#include "t4fw_api.h"
#include "l2t.h"
-#define DRV_VERSION "1.3.0-ko"
-#define DRV_DESC "Chelsio T4 Network Driver"
+#define DRV_VERSION "2.0.0-ko"
+#define DRV_DESC "Chelsio T4/T5 Network Driver"
/*
* Max interrupt hold-off timer value in us. Queues fall back to this value
CH_DEVICE(0x440a, 4),
CH_DEVICE(0x440d, 4),
CH_DEVICE(0x440e, 4),
+ CH_DEVICE(0x5001, 4),
+ CH_DEVICE(0x5002, 4),
+ CH_DEVICE(0x5003, 4),
+ CH_DEVICE(0x5004, 4),
+ CH_DEVICE(0x5005, 4),
+ CH_DEVICE(0x5006, 4),
+ CH_DEVICE(0x5007, 4),
+ CH_DEVICE(0x5008, 4),
+ CH_DEVICE(0x5009, 4),
+ CH_DEVICE(0x500A, 4),
+ CH_DEVICE(0x500B, 4),
+ CH_DEVICE(0x500C, 4),
+ CH_DEVICE(0x500D, 4),
+ CH_DEVICE(0x500E, 4),
+ CH_DEVICE(0x500F, 4),
+ CH_DEVICE(0x5010, 4),
+ CH_DEVICE(0x5011, 4),
+ CH_DEVICE(0x5012, 4),
+ CH_DEVICE(0x5013, 4),
+ CH_DEVICE(0x5401, 4),
+ CH_DEVICE(0x5402, 4),
+ CH_DEVICE(0x5403, 4),
+ CH_DEVICE(0x5404, 4),
+ CH_DEVICE(0x5405, 4),
+ CH_DEVICE(0x5406, 4),
+ CH_DEVICE(0x5407, 4),
+ CH_DEVICE(0x5408, 4),
+ CH_DEVICE(0x5409, 4),
+ CH_DEVICE(0x540A, 4),
+ CH_DEVICE(0x540B, 4),
+ CH_DEVICE(0x540C, 4),
+ CH_DEVICE(0x540D, 4),
+ CH_DEVICE(0x540E, 4),
+ CH_DEVICE(0x540F, 4),
+ CH_DEVICE(0x5410, 4),
+ CH_DEVICE(0x5411, 4),
+ CH_DEVICE(0x5412, 4),
+ CH_DEVICE(0x5413, 4),
{ 0, }
};
module_param(vf_acls, bool, 0644);
MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
-/* Since T5 has more num of PFs, using NUM_OF_PF_WITH_SRIOV_T5
- * macro as num_vf array size
+/* Configure the number of PCI-E Virtual Function which are to be instantiated
+ * on SR-IOV Capable Physical Functions.
*/
-static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV_T5];
+static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
module_param_array(num_vf, uint, NULL, 0644);
-MODULE_PARM_DESC(num_vf,
- "number of VFs for each of PFs 0-3 for T4 and PFs 0-7 for T5");
+MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
#endif
/*
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
- !!(dev->features & NETIF_F_HW_VLAN_RX), true);
+ !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
u8 opcode = ((const struct rss_header *)rsp)->opcode;
rsp++; /* skip RSS header */
+
+ /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
+ */
+ if (unlikely(opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
+ rsp++;
+ opcode = ((const struct rss_header *)rsp)->opcode;
+ rsp++;
+ if (opcode != CPL_SGE_EGR_UPDATE) {
+ dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
+ , opcode);
+ goto out;
+ }
+ }
+
if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
const struct cpl_sge_egr_update *p = (void *)rsp;
unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
} else
dev_err(q->adap->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
+out:
return 0;
}
{
struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+ /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
+ */
+ if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
+ rsp += 2;
+
if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
rxq->stats.nomem++;
return -1;
netdev_features_t changed = dev->features ^ features;
int err;
- if (!(changed & NETIF_F_HW_VLAN_RX))
+ if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
-1, -1, -1,
- !!(features & NETIF_F_HW_VLAN_RX), true);
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (unlikely(err))
- dev->features = features ^ NETIF_F_HW_VLAN_RX;
+ dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
return err;
}
int ret, ofst;
__be32 data[16];
- if (mem == MEM_MC)
- ret = t4_mc_read(adap, pos, data, NULL);
+ if ((mem == MEM_MC) || (mem == MEM_MC1))
+ ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
else
ret = t4_edc_read(adap, mem, pos, data, NULL);
if (ret)
static int setup_debugfs(struct adapter *adap)
{
int i;
+ u32 size;
if (IS_ERR_OR_NULL(adap->debugfs_root))
return -1;
i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
- if (i & EDRAM0_ENABLE)
- add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
- if (i & EDRAM1_ENABLE)
- add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
- if (i & EXT_MEM_ENABLE)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
+ if (i & EDRAM0_ENABLE) {
+ size = t4_read_reg(adap, MA_EDRAM0_BAR);
+ add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
+ }
+ if (i & EDRAM1_ENABLE) {
+ size = t4_read_reg(adap, MA_EDRAM1_BAR);
+ add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
+ }
+ if (is_t4(adap->chip)) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
+ if (i & EXT_MEM_ENABLE)
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(size));
+ } else {
+ if (i & EXT_MEM_ENABLE) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
+ add_debugfs_mem(adap, "mc0", MEM_MC0,
+ EXT_MEM_SIZE_GET(size));
+ }
+ if (i & EXT_MEM1_ENABLE) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
+ add_debugfs_mem(adap, "mc1", MEM_MC1,
+ EXT_MEM_SIZE_GET(size));
+ }
+ }
if (adap->l2t)
debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
&t4_l2t_fops);
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
+void cxgb4_disable_db_coalescing(struct net_device *dev)
+{
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
+ F_NOCOALESCE);
+}
+EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
+
+void cxgb4_enable_db_coalescing(struct net_device *dev)
+{
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
+}
+EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
+
static struct pci_driver cxgb4_driver;
static void check_neigh_update(struct neighbour *neigh)
static void setup_memwin(struct adapter *adap)
{
- u32 bar0;
+ u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
+ if (is_t4(adap->chip)) {
+ mem_win0_base = bar0 + MEMWIN0_BASE;
+ mem_win1_base = bar0 + MEMWIN1_BASE;
+ mem_win2_base = bar0 + MEMWIN2_BASE;
+ } else {
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win0_base = MEMWIN0_BASE;
+ mem_win1_base = MEMWIN1_BASE_T5;
+ mem_win2_base = MEMWIN2_BASE_T5;
+ }
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
- (bar0 + MEMWIN0_BASE) | BIR(0) |
+ mem_win0_base | BIR(0) |
WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
- (bar0 + MEMWIN1_BASE) | BIR(0) |
+ mem_win1_base | BIR(0) |
WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
- (bar0 + MEMWIN2_BASE) | BIR(0) |
+ mem_win2_base | BIR(0) |
WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
}
*/
{
int pf, vf;
- int max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
- NUM_OF_PF_WITH_SRIOV_T5;
- for (pf = 0; pf < max_no_pf; pf++) {
+ for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
if (num_vf[pf] <= 0)
continue;
adap->tids.aftid_end = val[1];
}
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
+ val[0] = 1;
+ (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
+
/*
* Get device capabilities so we can determine what resources we need
* to manage.
if (t4_wait_dev_ready(adap) < 0)
return PCI_ERS_RESULT_DISCONNECT;
- if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
+ if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
adap->flags |= FW_OK;
if (adap_init1(adap, &c))
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
-#ifdef CONFIG_PCI_IOV
- int max_no_pf;
-#endif
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
sriov:
#ifdef CONFIG_PCI_IOV
- max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
- NUM_OF_PF_WITH_SRIOV_T5;
-
- if (func < max_no_pf && num_vf[func] > 0)
+ if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
if (pci_enable_sriov(pdev, num_vf[func]) == 0)
dev_info(&pdev->dev,
"instantiated %u virtual functions\n",