2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/delay.h>
38 #include "t4_values.h"
40 #include "t4fw_version.h"
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
61 u32 val = t4_read_reg(adapter, reg);
63 if (!!(val & mask) == polarity) {
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
89 * Sets a register field specified by the supplied mask to the
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
102 * t4_read_indirect - read indirectly addressed registers
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
110 * Reads registers that are accessed indirectly through an address/data
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
125 * t4_write_indirect - write indirectly addressed registers
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
154 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
156 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
161 if (is_t4(adap->params.chip))
164 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
167 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168 * Configuration Space read. (None of the other fields matter when
169 * ENABLE is 0 so a simple register write is easier than a
170 * read-modify-write via t4_set_reg_field().)
172 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
176 * t4_report_fw_error - report firmware error
179 * The adapter firmware can indicate error conditions to the host.
180 * If the firmware has indicated an error, print out the reason for
181 * the firmware error.
183 static void t4_report_fw_error(struct adapter *adap)
185 static const char *const reason[] = {
186 "Crash", /* PCIE_FW_EVAL_CRASH */
187 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
188 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
189 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
192 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193 "Reserved", /* reserved */
197 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198 if (pcie_fw & PCIE_FW_ERR_F)
199 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200 reason[PCIE_FW_EVAL_G(pcie_fw)]);
204 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
206 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
209 for ( ; nflit; nflit--, mbox_addr += 8)
210 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
214 * Handle a FW assertion reported in a mailbox.
216 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
218 struct fw_debug_cmd asrt;
220 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
221 dev_alert(adap->pdev_dev,
222 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
223 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
224 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
228 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
229 * @adapter: the adapter
230 * @cmd: the Firmware Mailbox Command or Reply
231 * @size: command length in bytes
232 * @access: the time (ms) needed to access the Firmware Mailbox
233 * @execute: the time (ms) the command spent being executed
235 static void t4_record_mbox(struct adapter *adapter,
236 const __be64 *cmd, unsigned int size,
237 int access, int execute)
239 struct mbox_cmd_log *log = adapter->mbox_log;
240 struct mbox_cmd *entry;
243 entry = mbox_cmd_log_entry(log, log->cursor++);
244 if (log->cursor == log->size)
247 for (i = 0; i < size / 8; i++)
248 entry->cmd[i] = be64_to_cpu(cmd[i]);
249 while (i < MBOX_LEN / 8)
251 entry->timestamp = jiffies;
252 entry->seqno = log->seqno++;
253 entry->access = access;
254 entry->execute = execute;
258 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
260 * @mbox: index of the mailbox to use
261 * @cmd: the command to write
262 * @size: command length in bytes
263 * @rpl: where to optionally store the reply
264 * @sleep_ok: if true we may sleep while awaiting command completion
265 * @timeout: time to wait for command to finish before timing out
267 * Sends the given command to FW through the selected mailbox and waits
268 * for the FW to execute the command. If @rpl is not %NULL it is used to
269 * store the FW's reply to the command. The command and its optional
270 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
271 * to respond. @sleep_ok determines whether we may sleep while awaiting
272 * the response. If sleeping is allowed we use progressive backoff
275 * The return value is 0 on success or a negative errno on failure. A
276 * failure can happen either because we are not able to execute the
277 * command or FW executes it but signals an error. In the latter case
278 * the return value is the error code indicated by FW (negated).
280 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
281 int size, void *rpl, bool sleep_ok, int timeout)
283 static const int delay[] = {
284 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
287 struct mbox_list entry;
292 int i, ms, delay_idx, ret;
293 const __be64 *p = cmd;
294 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
295 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
296 __be64 cmd_rpl[MBOX_LEN / 8];
299 if ((size & 15) || size > MBOX_LEN)
303 * If the device is off-line, as in EEH, commands will time out.
304 * Fail them early so we don't waste time waiting.
306 if (adap->pdev->error_state != pci_channel_io_normal)
309 /* If we have a negative timeout, that implies that we can't sleep. */
315 /* Queue ourselves onto the mailbox access list. When our entry is at
316 * the front of the list, we have rights to access the mailbox. So we
317 * wait [for a while] till we're at the front [or bail out with an
320 spin_lock(&adap->mbox_lock);
321 list_add_tail(&entry.list, &adap->mlist.list);
322 spin_unlock(&adap->mbox_lock);
327 for (i = 0; ; i += ms) {
328 /* If we've waited too long, return a busy indication. This
329 * really ought to be based on our initial position in the
330 * mailbox access list but this is a start. We very rearely
331 * contend on access to the mailbox ...
333 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
334 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
335 spin_lock(&adap->mbox_lock);
336 list_del(&entry.list);
337 spin_unlock(&adap->mbox_lock);
338 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
339 t4_record_mbox(adap, cmd, size, access, ret);
343 /* If we're at the head, break out and start the mailbox
346 if (list_first_entry(&adap->mlist.list, struct mbox_list,
350 /* Delay for a bit before checking again ... */
352 ms = delay[delay_idx]; /* last element may repeat */
353 if (delay_idx < ARRAY_SIZE(delay) - 1)
361 /* Loop trying to get ownership of the mailbox. Return an error
362 * if we can't gain ownership.
364 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
365 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367 if (v != MBOX_OWNER_DRV) {
368 spin_lock(&adap->mbox_lock);
369 list_del(&entry.list);
370 spin_unlock(&adap->mbox_lock);
371 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
372 t4_record_mbox(adap, cmd, size, access, ret);
376 /* Copy in the new mailbox command and send it on its way ... */
377 t4_record_mbox(adap, cmd, size, access, 0);
378 for (i = 0; i < size; i += 8)
379 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
381 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
382 t4_read_reg(adap, ctl_reg); /* flush write */
388 !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
392 ms = delay[delay_idx]; /* last element may repeat */
393 if (delay_idx < ARRAY_SIZE(delay) - 1)
399 v = t4_read_reg(adap, ctl_reg);
400 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
401 if (!(v & MBMSGVALID_F)) {
402 t4_write_reg(adap, ctl_reg, 0);
406 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
407 res = be64_to_cpu(cmd_rpl[0]);
409 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
410 fw_asrt(adap, data_reg);
411 res = FW_CMD_RETVAL_V(EIO);
413 memcpy(rpl, cmd_rpl, size);
416 t4_write_reg(adap, ctl_reg, 0);
419 t4_record_mbox(adap, cmd_rpl,
420 MBOX_LEN, access, execute);
421 spin_lock(&adap->mbox_lock);
422 list_del(&entry.list);
423 spin_unlock(&adap->mbox_lock);
424 return -FW_CMD_RETVAL_G((int)res);
428 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
429 t4_record_mbox(adap, cmd, size, access, ret);
430 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
431 *(const u8 *)cmd, mbox);
432 t4_report_fw_error(adap);
433 spin_lock(&adap->mbox_lock);
434 list_del(&entry.list);
435 spin_unlock(&adap->mbox_lock);
440 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
441 void *rpl, bool sleep_ok)
443 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
447 static int t4_edc_err_read(struct adapter *adap, int idx)
449 u32 edc_ecc_err_addr_reg;
452 if (is_t4(adap->params.chip)) {
453 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
456 if (idx != 0 && idx != 1) {
457 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
461 edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
462 rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
465 "edc%d err addr 0x%x: 0x%x.\n",
466 idx, edc_ecc_err_addr_reg,
467 t4_read_reg(adap, edc_ecc_err_addr_reg));
469 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
471 (unsigned long long)t4_read_reg64(adap, rdata_reg),
472 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
473 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
474 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
475 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
476 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
477 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
478 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
479 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
485 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
487 * @win: PCI-E Memory Window to use
488 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
489 * @addr: address within indicated memory type
490 * @len: amount of memory to transfer
491 * @hbuf: host memory buffer
492 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
494 * Reads/writes an [almost] arbitrary memory region in the firmware: the
495 * firmware memory address and host buffer must be aligned on 32-bit
496 * boudaries; the length may be arbitrary. The memory is transferred as
497 * a raw byte sequence from/to the firmware's memory. If this memory
498 * contains data structures which contain multi-byte integers, it's the
499 * caller's responsibility to perform appropriate byte order conversions.
501 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
502 u32 len, void *hbuf, int dir)
504 u32 pos, offset, resid, memoffset;
505 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
508 /* Argument sanity checks ...
510 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
514 /* It's convenient to be able to handle lengths which aren't a
515 * multiple of 32-bits because we often end up transferring files to
516 * the firmware. So we'll handle that by normalizing the length here
517 * and then handling any residual transfer at the end.
522 /* Offset into the region of memory which is being accessed
525 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
526 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
528 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
529 if (mtype != MEM_MC1)
530 memoffset = (mtype * (edc_size * 1024 * 1024));
532 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
533 MA_EXT_MEMORY0_BAR_A));
534 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
537 /* Determine the PCIE_MEM_ACCESS_OFFSET */
538 addr = addr + memoffset;
540 /* Each PCI-E Memory Window is programmed with a window size -- or
541 * "aperture" -- which controls the granularity of its mapping onto
542 * adapter memory. We need to grab that aperture in order to know
543 * how to use the specified window. The window is also programmed
544 * with the base address of the Memory Window in BAR0's address
545 * space. For T4 this is an absolute PCI-E Bus Address. For T5
546 * the address is relative to BAR0.
548 mem_reg = t4_read_reg(adap,
549 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
551 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
552 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
553 if (is_t4(adap->params.chip))
554 mem_base -= adap->t4_bar0;
555 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
557 /* Calculate our initial PCI-E Memory Window Position and Offset into
560 pos = addr & ~(mem_aperture-1);
563 /* Set up initial PCI-E Memory Window to cover the start of our
564 * transfer. (Read it back to ensure that changes propagate before we
565 * attempt to use the new value.)
568 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
571 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
573 /* Transfer data to/from the adapter as long as there's an integral
574 * number of 32-bit transfers to complete.
576 * A note on Endianness issues:
578 * The "register" reads and writes below from/to the PCI-E Memory
579 * Window invoke the standard adapter Big-Endian to PCI-E Link
580 * Little-Endian "swizzel." As a result, if we have the following
581 * data in adapter memory:
583 * Memory: ... | b0 | b1 | b2 | b3 | ...
584 * Address: i+0 i+1 i+2 i+3
586 * Then a read of the adapter memory via the PCI-E Memory Window
591 * [ b3 | b2 | b1 | b0 ]
593 * If this value is stored into local memory on a Little-Endian system
594 * it will show up correctly in local memory as:
596 * ( ..., b0, b1, b2, b3, ... )
598 * But on a Big-Endian system, the store will show up in memory
599 * incorrectly swizzled as:
601 * ( ..., b3, b2, b1, b0, ... )
603 * So we need to account for this in the reads and writes to the
604 * PCI-E Memory Window below by undoing the register read/write
608 if (dir == T4_MEMORY_READ)
609 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
612 t4_write_reg(adap, mem_base + offset,
613 (__force u32)cpu_to_le32(*buf++));
614 offset += sizeof(__be32);
615 len -= sizeof(__be32);
617 /* If we've reached the end of our current window aperture,
618 * move the PCI-E Memory Window on to the next. Note that
619 * doing this here after "len" may be 0 allows us to set up
620 * the PCI-E Memory Window for a possible final residual
623 if (offset == mem_aperture) {
627 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
630 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
635 /* If the original transfer had a length which wasn't a multiple of
636 * 32-bits, now's where we need to finish off the transfer of the
637 * residual amount. The PCI-E Memory Window has already been moved
638 * above (if necessary) to cover this final transfer.
648 if (dir == T4_MEMORY_READ) {
649 last.word = le32_to_cpu(
650 (__force __le32)t4_read_reg(adap,
652 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
653 bp[i] = last.byte[i];
656 for (i = resid; i < 4; i++)
658 t4_write_reg(adap, mem_base + offset,
659 (__force u32)cpu_to_le32(last.word));
666 /* Return the specified PCI-E Configuration Space register from our Physical
667 * Function. We try first via a Firmware LDST Command since we prefer to let
668 * the firmware own all of these registers, but if that fails we go for it
669 * directly ourselves.
671 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
673 u32 val, ldst_addrspace;
675 /* If fw_attach != 0, construct and send the Firmware LDST Command to
676 * retrieve the specified PCI-E Configuration Space register.
678 struct fw_ldst_cmd ldst_cmd;
681 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
682 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
683 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
687 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
688 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
689 ldst_cmd.u.pcie.ctrl_to_fn =
690 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
691 ldst_cmd.u.pcie.r = reg;
693 /* If the LDST Command succeeds, return the result, otherwise
694 * fall through to reading it directly ourselves ...
696 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
699 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
701 /* Read the desired Configuration Space register via the PCI-E
702 * Backdoor mechanism.
704 t4_hw_pci_read_cfg4(adap, reg, &val);
708 /* Get the window based on base passed to it.
709 * Window aperture is currently unhandled, but there is no use case for it
712 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
717 if (is_t4(adap->params.chip)) {
720 /* Truncation intentional: we only read the bottom 32-bits of
721 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
722 * mechanism to read BAR0 instead of using
723 * pci_resource_start() because we could be operating from
724 * within a Virtual Machine which is trapping our accesses to
725 * our Configuration Space and we need to set up the PCI-E
726 * Memory Window decoders with the actual addresses which will
727 * be coming across the PCI-E link.
729 bar0 = t4_read_pcie_cfg4(adap, pci_base);
731 adap->t4_bar0 = bar0;
733 ret = bar0 + memwin_base;
735 /* For T5, only relative offset inside the PCIe BAR is passed */
741 /* Get the default utility window (win0) used by everyone */
742 u32 t4_get_util_window(struct adapter *adap)
744 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
745 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
748 /* Set up memory window for accessing adapter memory ranges. (Read
749 * back MA register to ensure that changes propagate before we attempt
750 * to use the new values.)
752 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
755 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
756 memwin_base | BIR_V(0) |
757 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
759 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
763 * t4_get_regs_len - return the size of the chips register set
764 * @adapter: the adapter
766 * Returns the size of the chip's BAR0 register space.
768 unsigned int t4_get_regs_len(struct adapter *adapter)
770 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
772 switch (chip_version) {
774 return T4_REGMAP_SIZE;
778 return T5_REGMAP_SIZE;
781 dev_err(adapter->pdev_dev,
782 "Unsupported chip version %d\n", chip_version);
787 * t4_get_regs - read chip registers into provided buffer
789 * @buf: register buffer
790 * @buf_size: size (in bytes) of register buffer
792 * If the provided register buffer isn't large enough for the chip's
793 * full register range, the register dump will be truncated to the
794 * register buffer's size.
796 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
798 static const unsigned int t4_reg_ranges[] = {
1257 static const unsigned int t5_reg_ranges[] = {
2024 static const unsigned int t6_reg_ranges[] = {
2585 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2586 const unsigned int *reg_ranges;
2587 int reg_ranges_size, range;
2588 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2590 /* Select the right set of register ranges to dump depending on the
2591 * adapter chip type.
2593 switch (chip_version) {
2595 reg_ranges = t4_reg_ranges;
2596 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2600 reg_ranges = t5_reg_ranges;
2601 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2605 reg_ranges = t6_reg_ranges;
2606 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2610 dev_err(adap->pdev_dev,
2611 "Unsupported chip version %d\n", chip_version);
2615 /* Clear the register buffer and insert the appropriate register
2616 * values selected by the above register ranges.
2618 memset(buf, 0, buf_size);
2619 for (range = 0; range < reg_ranges_size; range += 2) {
2620 unsigned int reg = reg_ranges[range];
2621 unsigned int last_reg = reg_ranges[range + 1];
2622 u32 *bufp = (u32 *)((char *)buf + reg);
2624 /* Iterate across the register range filling in the register
2625 * buffer but don't write past the end of the register buffer.
2627 while (reg <= last_reg && bufp < buf_end) {
2628 *bufp++ = t4_read_reg(adap, reg);
2634 #define EEPROM_STAT_ADDR 0x7bfc
2635 #define VPD_SIZE 0x800
2636 #define VPD_BASE 0x400
2637 #define VPD_BASE_OLD 0
2638 #define VPD_LEN 1024
2639 #define CHELSIO_VPD_UNIQUE_ID 0x82
2642 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2643 * @phys_addr: the physical EEPROM address
2644 * @fn: the PCI function number
2645 * @sz: size of function-specific area
2647 * Translate a physical EEPROM address to virtual. The first 1K is
2648 * accessed through virtual addresses starting at 31K, the rest is
2649 * accessed through virtual addresses starting at 0.
2651 * The mapping is as follows:
2652 * [0..1K) -> [31K..32K)
2653 * [1K..1K+A) -> [31K-A..31K)
2654 * [1K+A..ES) -> [0..ES-A-1K)
2656 * where A = @fn * @sz, and ES = EEPROM size.
2658 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2661 if (phys_addr < 1024)
2662 return phys_addr + (31 << 10);
2663 if (phys_addr < 1024 + fn)
2664 return 31744 - fn + phys_addr - 1024;
2665 if (phys_addr < EEPROMSIZE)
2666 return phys_addr - 1024 - fn;
2671 * t4_seeprom_wp - enable/disable EEPROM write protection
2672 * @adapter: the adapter
2673 * @enable: whether to enable or disable write protection
2675 * Enables or disables write protection on the serial EEPROM.
2677 int t4_seeprom_wp(struct adapter *adapter, bool enable)
2679 unsigned int v = enable ? 0xc : 0;
2680 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2681 return ret < 0 ? ret : 0;
2685 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2686 * @adapter: adapter to read
2687 * @p: where to store the parameters
2689 * Reads card parameters stored in VPD EEPROM.
2691 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2693 int i, ret = 0, addr;
2696 unsigned int vpdr_len, kw_offset, id_len;
2698 vpd = vmalloc(VPD_LEN);
2702 /* We have two VPD data structures stored in the adapter VPD area.
2703 * By default, Linux calculates the size of the VPD area by traversing
2704 * the first VPD area at offset 0x0, so we need to tell the OS what
2705 * our real VPD size is.
2707 ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2711 /* Card information normally starts at VPD_BASE but early cards had
2714 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
2718 /* The VPD shall have a unique identifier specified by the PCI SIG.
2719 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2720 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2721 * is expected to automatically put this entry at the
2722 * beginning of the VPD.
2724 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2726 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2730 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2731 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
2736 id_len = pci_vpd_lrdt_size(vpd);
2737 if (id_len > ID_LEN)
2740 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2742 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
2747 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2748 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2749 if (vpdr_len + kw_offset > VPD_LEN) {
2750 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
2755 #define FIND_VPD_KW(var, name) do { \
2756 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
2758 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
2762 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2765 FIND_VPD_KW(i, "RV");
2766 for (csum = 0; i >= 0; i--)
2770 dev_err(adapter->pdev_dev,
2771 "corrupted VPD EEPROM, actual csum %u\n", csum);
2776 FIND_VPD_KW(ec, "EC");
2777 FIND_VPD_KW(sn, "SN");
2778 FIND_VPD_KW(pn, "PN");
2779 FIND_VPD_KW(na, "NA");
2782 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
2784 memcpy(p->ec, vpd + ec, EC_LEN);
2786 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2787 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2789 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
2790 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2792 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2793 strim((char *)p->na);
2797 return ret < 0 ? ret : 0;
2801 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2802 * @adapter: adapter to read
2803 * @p: where to store the parameters
2805 * Reads card parameters stored in VPD EEPROM and retrieves the Core
2806 * Clock. This can only be called after a connection to the firmware
2809 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2811 u32 cclk_param, cclk_val;
2814 /* Grab the raw VPD parameters.
2816 ret = t4_get_raw_vpd_params(adapter, p);
2820 /* Ask firmware for the Core Clock since it knows how to translate the
2821 * Reference Clock ('V2') VPD field into a Core Clock value ...
2823 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2824 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2825 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2826 1, &cclk_param, &cclk_val);
2835 /* serial flash and firmware constants */
2837 SF_ATTEMPTS = 10, /* max retries for SF operations */
2839 /* flash command opcodes */
2840 SF_PROG_PAGE = 2, /* program page */
2841 SF_WR_DISABLE = 4, /* disable writes */
2842 SF_RD_STATUS = 5, /* read status register */
2843 SF_WR_ENABLE = 6, /* enable writes */
2844 SF_RD_DATA_FAST = 0xb, /* read flash */
2845 SF_RD_ID = 0x9f, /* read ID */
2846 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2848 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
2852 * sf1_read - read data from the serial flash
2853 * @adapter: the adapter
2854 * @byte_cnt: number of bytes to read
2855 * @cont: whether another operation will be chained
2856 * @lock: whether to lock SF for PL access only
2857 * @valp: where to store the read data
2859 * Reads up to 4 bytes of data from the serial flash. The location of
2860 * the read needs to be specified prior to calling this by issuing the
2861 * appropriate commands to the serial flash.
2863 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2864 int lock, u32 *valp)
2868 if (!byte_cnt || byte_cnt > 4)
2870 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2872 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2873 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2874 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2876 *valp = t4_read_reg(adapter, SF_DATA_A);
2881 * sf1_write - write data to the serial flash
2882 * @adapter: the adapter
2883 * @byte_cnt: number of bytes to write
2884 * @cont: whether another operation will be chained
2885 * @lock: whether to lock SF for PL access only
2886 * @val: value to write
2888 * Writes up to 4 bytes of data to the serial flash. The location of
2889 * the write needs to be specified prior to calling this by issuing the
2890 * appropriate commands to the serial flash.
2892 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2895 if (!byte_cnt || byte_cnt > 4)
2897 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2899 t4_write_reg(adapter, SF_DATA_A, val);
2900 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2901 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2902 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2906 * flash_wait_op - wait for a flash operation to complete
2907 * @adapter: the adapter
2908 * @attempts: max number of polls of the status register
2909 * @delay: delay between polls in ms
2911 * Wait for a flash operation to complete by polling the status register.
2913 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
2919 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
2920 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
2924 if (--attempts == 0)
2932 * t4_read_flash - read words from serial flash
2933 * @adapter: the adapter
2934 * @addr: the start address for the read
2935 * @nwords: how many 32-bit words to read
2936 * @data: where to store the read data
2937 * @byte_oriented: whether to store data as bytes or as words
2939 * Read the specified number of 32-bit words from the serial flash.
2940 * If @byte_oriented is set the read data is stored as a byte array
2941 * (i.e., big-endian), otherwise as 32-bit words in the platform's
2942 * natural endianness.
2944 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2945 unsigned int nwords, u32 *data, int byte_oriented)
2949 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
2952 addr = swab32(addr) | SF_RD_DATA_FAST;
2954 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
2955 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
2958 for ( ; nwords; nwords--, data++) {
2959 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2961 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
2965 *data = (__force __u32)(cpu_to_be32(*data));
2971 * t4_write_flash - write up to a page of data to the serial flash
2972 * @adapter: the adapter
2973 * @addr: the start address to write
2974 * @n: length of data to write in bytes
2975 * @data: the data to write
2977 * Writes up to a page of data (256 bytes) to the serial flash starting
2978 * at the given address. All the data must be written to the same page.
2980 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
2981 unsigned int n, const u8 *data)
2985 unsigned int i, c, left, val, offset = addr & 0xff;
2987 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
2990 val = swab32(addr) | SF_PROG_PAGE;
2992 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2993 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
2996 for (left = n; left; left -= c) {
2998 for (val = 0, i = 0; i < c; ++i)
2999 val = (val << 8) + *data++;
3001 ret = sf1_write(adapter, c, c != left, 1, val);
3005 ret = flash_wait_op(adapter, 8, 1);
3009 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3011 /* Read the page to verify the write succeeded */
3012 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
3016 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3017 dev_err(adapter->pdev_dev,
3018 "failed to correctly write the flash page at %#x\n",
3025 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3030 * t4_get_fw_version - read the firmware version
3031 * @adapter: the adapter
3032 * @vers: where to place the version
3034 * Reads the FW version from flash.
3036 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3038 return t4_read_flash(adapter, FLASH_FW_START +
3039 offsetof(struct fw_hdr, fw_ver), 1,
3044 * t4_get_bs_version - read the firmware bootstrap version
3045 * @adapter: the adapter
3046 * @vers: where to place the version
3048 * Reads the FW Bootstrap version from flash.
3050 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3052 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3053 offsetof(struct fw_hdr, fw_ver), 1,
3058 * t4_get_tp_version - read the TP microcode version
3059 * @adapter: the adapter
3060 * @vers: where to place the version
3062 * Reads the TP microcode version from flash.
3064 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3066 return t4_read_flash(adapter, FLASH_FW_START +
3067 offsetof(struct fw_hdr, tp_microcode_ver),
3072 * t4_get_exprom_version - return the Expansion ROM version (if any)
3073 * @adapter: the adapter
3074 * @vers: where to place the version
3076 * Reads the Expansion ROM header from FLASH and returns the version
3077 * number (if present) through the @vers return value pointer. We return
3078 * this in the Firmware Version Format since it's convenient. Return
3079 * 0 on success, -ENOENT if no Expansion ROM is present.
3081 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3083 struct exprom_header {
3084 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3085 unsigned char hdr_ver[4]; /* Expansion ROM version */
3087 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3091 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3092 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3097 hdr = (struct exprom_header *)exprom_header_buf;
3098 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3101 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3102 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3103 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3104 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3109 * t4_get_vpd_version - return the VPD version
3110 * @adapter: the adapter
3111 * @vers: where to place the version
3113 * Reads the VPD via the Firmware interface (thus this can only be called
3114 * once we're ready to issue Firmware commands). The format of the
3115 * VPD version is adapter specific. Returns 0 on success, an error on
3118 * Note that early versions of the Firmware didn't include the ability
3119 * to retrieve the VPD version, so we zero-out the return-value parameter
3120 * in that case to avoid leaving it with garbage in it.
3122 * Also note that the Firmware will return its cached copy of the VPD
3123 * Revision ID, not the actual Revision ID as written in the Serial
3124 * EEPROM. This is only an issue if a new VPD has been written and the
3125 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3126 * to defer calling this routine till after a FW_RESET_CMD has been issued
3127 * if the Host Driver will be performing a full adapter initialization.
3129 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3134 vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3135 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3136 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3137 1, &vpdrev_param, vers);
3144 * t4_get_scfg_version - return the Serial Configuration version
3145 * @adapter: the adapter
3146 * @vers: where to place the version
3148 * Reads the Serial Configuration Version via the Firmware interface
3149 * (thus this can only be called once we're ready to issue Firmware
3150 * commands). The format of the Serial Configuration version is
3151 * adapter specific. Returns 0 on success, an error on failure.
3153 * Note that early versions of the Firmware didn't include the ability
3154 * to retrieve the Serial Configuration version, so we zero-out the
3155 * return-value parameter in that case to avoid leaving it with
3158 * Also note that the Firmware will return its cached copy of the Serial
3159 * Initialization Revision ID, not the actual Revision ID as written in
3160 * the Serial EEPROM. This is only an issue if a new VPD has been written
3161 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3162 * it's best to defer calling this routine till after a FW_RESET_CMD has
3163 * been issued if the Host Driver will be performing a full adapter
3166 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3171 scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3172 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3173 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3174 1, &scfgrev_param, vers);
3181 * t4_get_version_info - extract various chip/firmware version information
3182 * @adapter: the adapter
3184 * Reads various chip/firmware version numbers and stores them into the
3185 * adapter Adapter Parameters structure. If any of the efforts fails
3186 * the first failure will be returned, but all of the version numbers
3189 int t4_get_version_info(struct adapter *adapter)
3193 #define FIRST_RET(__getvinfo) \
3195 int __ret = __getvinfo; \
3196 if (__ret && !ret) \
3200 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3201 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3202 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3203 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3204 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3205 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3212 * t4_dump_version_info - dump all of the adapter configuration IDs
3213 * @adapter: the adapter
3215 * Dumps all of the various bits of adapter configuration version/revision
3216 * IDs information. This is typically called at some point after
3217 * t4_get_version_info() has been called.
3219 void t4_dump_version_info(struct adapter *adapter)
3221 /* Device information */
3222 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3223 adapter->params.vpd.id,
3224 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3225 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3226 adapter->params.vpd.sn, adapter->params.vpd.pn);
3228 /* Firmware Version */
3229 if (!adapter->params.fw_vers)
3230 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3232 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3233 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3234 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3235 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3236 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3238 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3239 * Firmware, so dev_info() is more appropriate here.)
3241 if (!adapter->params.bs_vers)
3242 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3244 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3245 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3246 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3247 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3248 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3250 /* TP Microcode Version */
3251 if (!adapter->params.tp_vers)
3252 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3254 dev_info(adapter->pdev_dev,
3255 "TP Microcode version: %u.%u.%u.%u\n",
3256 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3257 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3258 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3259 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3261 /* Expansion ROM version */
3262 if (!adapter->params.er_vers)
3263 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3265 dev_info(adapter->pdev_dev,
3266 "Expansion ROM version: %u.%u.%u.%u\n",
3267 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3268 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3269 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3270 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3272 /* Serial Configuration version */
3273 dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3274 adapter->params.scfg_vers);
3277 dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3278 adapter->params.vpd_vers);
3282 * t4_check_fw_version - check if the FW is supported with this driver
3283 * @adap: the adapter
3285 * Checks if an adapter's FW is compatible with the driver. Returns 0
3286 * if there's exact match, a negative error if the version could not be
3287 * read or there's a major version mismatch
3289 int t4_check_fw_version(struct adapter *adap)
3291 int i, ret, major, minor, micro;
3292 int exp_major, exp_minor, exp_micro;
3293 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3295 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3296 /* Try multiple times before returning error */
3297 for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3298 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3303 major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3304 minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3305 micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3307 switch (chip_version) {
3309 exp_major = T4FW_MIN_VERSION_MAJOR;
3310 exp_minor = T4FW_MIN_VERSION_MINOR;
3311 exp_micro = T4FW_MIN_VERSION_MICRO;
3314 exp_major = T5FW_MIN_VERSION_MAJOR;
3315 exp_minor = T5FW_MIN_VERSION_MINOR;
3316 exp_micro = T5FW_MIN_VERSION_MICRO;
3319 exp_major = T6FW_MIN_VERSION_MAJOR;
3320 exp_minor = T6FW_MIN_VERSION_MINOR;
3321 exp_micro = T6FW_MIN_VERSION_MICRO;
3324 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3329 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3330 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3331 dev_err(adap->pdev_dev,
3332 "Card has firmware version %u.%u.%u, minimum "
3333 "supported firmware is %u.%u.%u.\n", major, minor,
3334 micro, exp_major, exp_minor, exp_micro);
3340 /* Is the given firmware API compatible with the one the driver was compiled
3343 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3346 /* short circuit if it's the exact same firmware version */
3347 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3350 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3351 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3352 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3359 /* The firmware in the filesystem is usable, but should it be installed?
3360 * This routine explains itself in detail if it indicates the filesystem
3361 * firmware should be installed.
3363 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3368 if (!card_fw_usable) {
3369 reason = "incompatible or unusable";
3374 reason = "older than the version supported with this driver";
3381 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3382 "installing firmware %u.%u.%u.%u on card.\n",
3383 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3384 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3385 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3386 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3391 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3392 const u8 *fw_data, unsigned int fw_size,
3393 struct fw_hdr *card_fw, enum dev_state state,
3396 int ret, card_fw_usable, fs_fw_usable;
3397 const struct fw_hdr *fs_fw;
3398 const struct fw_hdr *drv_fw;
3400 drv_fw = &fw_info->fw_hdr;
3402 /* Read the header of the firmware on the card */
3403 ret = -t4_read_flash(adap, FLASH_FW_START,
3404 sizeof(*card_fw) / sizeof(uint32_t),
3405 (uint32_t *)card_fw, 1);
3407 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3409 dev_err(adap->pdev_dev,
3410 "Unable to read card's firmware header: %d\n", ret);
3414 if (fw_data != NULL) {
3415 fs_fw = (const void *)fw_data;
3416 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3422 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3423 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3424 /* Common case: the firmware on the card is an exact match and
3425 * the filesystem one is an exact match too, or the filesystem
3426 * one is absent/incompatible.
3428 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3429 should_install_fs_fw(adap, card_fw_usable,
3430 be32_to_cpu(fs_fw->fw_ver),
3431 be32_to_cpu(card_fw->fw_ver))) {
3432 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
3435 dev_err(adap->pdev_dev,
3436 "failed to install firmware: %d\n", ret);
3440 /* Installed successfully, update the cached header too. */
3443 *reset = 0; /* already reset as part of load_fw */
3446 if (!card_fw_usable) {
3449 d = be32_to_cpu(drv_fw->fw_ver);
3450 c = be32_to_cpu(card_fw->fw_ver);
3451 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3453 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3455 "driver compiled with %d.%d.%d.%d, "
3456 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3458 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3459 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3460 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3461 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3462 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3463 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3468 /* We're using whatever's on the card and it's known to be good. */
3469 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3470 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3477 * t4_flash_erase_sectors - erase a range of flash sectors
3478 * @adapter: the adapter
3479 * @start: the first sector to erase
3480 * @end: the last sector to erase
3482 * Erases the sectors in the given inclusive range.
3484 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3488 if (end >= adapter->params.sf_nsec)
3491 while (start <= end) {
3492 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3493 (ret = sf1_write(adapter, 4, 0, 1,
3494 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3495 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3496 dev_err(adapter->pdev_dev,
3497 "erase of flash sector %d failed, error %d\n",
3503 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3508 * t4_flash_cfg_addr - return the address of the flash configuration file
3509 * @adapter: the adapter
3511 * Return the address within the flash where the Firmware Configuration
3514 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3516 if (adapter->params.sf_size == 0x100000)
3517 return FLASH_FPGA_CFG_START;
3519 return FLASH_CFG_START;
3522 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
3523 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3524 * and emit an error message for mismatched firmware to save our caller the
3527 static bool t4_fw_matches_chip(const struct adapter *adap,
3528 const struct fw_hdr *hdr)
3530 /* The expression below will return FALSE for any unsupported adapter
3531 * which will keep us "honest" in the future ...
3533 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3534 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3535 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3538 dev_err(adap->pdev_dev,
3539 "FW image (%d) is not suitable for this adapter (%d)\n",
3540 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3545 * t4_load_fw - download firmware
3546 * @adap: the adapter
3547 * @fw_data: the firmware image to write
3550 * Write the supplied firmware image to the card's serial flash.
3552 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3557 u8 first_page[SF_PAGE_SIZE];
3558 const __be32 *p = (const __be32 *)fw_data;
3559 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3560 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3561 unsigned int fw_img_start = adap->params.sf_fw_start;
3562 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
3565 dev_err(adap->pdev_dev, "FW image has no data\n");
3569 dev_err(adap->pdev_dev,
3570 "FW image size not multiple of 512 bytes\n");
3573 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3574 dev_err(adap->pdev_dev,
3575 "FW image size differs from size in FW header\n");
3578 if (size > FW_MAX_SIZE) {
3579 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3583 if (!t4_fw_matches_chip(adap, hdr))
3586 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3587 csum += be32_to_cpu(p[i]);
3589 if (csum != 0xffffffff) {
3590 dev_err(adap->pdev_dev,
3591 "corrupted firmware image, checksum %#x\n", csum);
3595 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3596 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3601 * We write the correct version at the end so the driver can see a bad
3602 * version if the FW write fails. Start by writing a copy of the
3603 * first page with a bad version.
3605 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3606 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3607 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
3611 addr = fw_img_start;
3612 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3613 addr += SF_PAGE_SIZE;
3614 fw_data += SF_PAGE_SIZE;
3615 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
3620 ret = t4_write_flash(adap,
3621 fw_img_start + offsetof(struct fw_hdr, fw_ver),
3622 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3625 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3628 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3633 * t4_phy_fw_ver - return current PHY firmware version
3634 * @adap: the adapter
3635 * @phy_fw_ver: return value buffer for PHY firmware version
3637 * Returns the current version of external PHY firmware on the
3640 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3645 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3646 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3647 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3648 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3649 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3658 * t4_load_phy_fw - download port PHY firmware
3659 * @adap: the adapter
3660 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
3661 * @win_lock: the lock to use to guard the memory copy
3662 * @phy_fw_version: function to check PHY firmware versions
3663 * @phy_fw_data: the PHY firmware image to write
3664 * @phy_fw_size: image size
3666 * Transfer the specified PHY firmware to the adapter. If a non-NULL
3667 * @phy_fw_version is supplied, then it will be used to determine if
3668 * it's necessary to perform the transfer by comparing the version
3669 * of any existing adapter PHY firmware with that of the passed in
3670 * PHY firmware image. If @win_lock is non-NULL then it will be used
3671 * around the call to t4_memory_rw() which transfers the PHY firmware
3674 * A negative error number will be returned if an error occurs. If
3675 * version number support is available and there's no need to upgrade
3676 * the firmware, 0 will be returned. If firmware is successfully
3677 * transferred to the adapter, 1 will be retured.
3679 * NOTE: some adapters only have local RAM to store the PHY firmware. As
3680 * a result, a RESET of the adapter would cause that RAM to lose its
3681 * contents. Thus, loading PHY firmware on such adapters must happen
3682 * after any FW_RESET_CMDs ...
3684 int t4_load_phy_fw(struct adapter *adap,
3685 int win, spinlock_t *win_lock,
3686 int (*phy_fw_version)(const u8 *, size_t),
3687 const u8 *phy_fw_data, size_t phy_fw_size)
3689 unsigned long mtype = 0, maddr = 0;
3691 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3694 /* If we have version number support, then check to see if the adapter
3695 * already has up-to-date PHY firmware loaded.
3697 if (phy_fw_version) {
3698 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3699 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3703 if (cur_phy_fw_ver >= new_phy_fw_vers) {
3704 CH_WARN(adap, "PHY Firmware already up-to-date, "
3705 "version %#x\n", cur_phy_fw_ver);
3710 /* Ask the firmware where it wants us to copy the PHY firmware image.
3711 * The size of the file requires a special version of the READ coommand
3712 * which will pass the file size via the values field in PARAMS_CMD and
3713 * retrieve the return value from firmware and place it in the same
3716 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3717 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3718 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3719 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3721 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3722 ¶m, &val, 1, true);
3726 maddr = (val & 0xff) << 16;
3728 /* Copy the supplied PHY Firmware image to the adapter memory location
3729 * allocated by the adapter firmware.
3732 spin_lock_bh(win_lock);
3733 ret = t4_memory_rw(adap, win, mtype, maddr,
3734 phy_fw_size, (__be32 *)phy_fw_data,
3737 spin_unlock_bh(win_lock);
3741 /* Tell the firmware that the PHY firmware image has been written to
3742 * RAM and it can now start copying it over to the PHYs. The chip
3743 * firmware will RESET the affected PHYs as part of this operation
3744 * leaving them running the new PHY firmware image.
3746 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3747 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3748 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3749 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3750 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3751 ¶m, &val, 30000);
3753 /* If we have version number support, then check to see that the new
3754 * firmware got loaded properly.
3756 if (phy_fw_version) {
3757 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3761 if (cur_phy_fw_ver != new_phy_fw_vers) {
3762 CH_WARN(adap, "PHY Firmware did not update: "
3763 "version on adapter %#x, "
3764 "version flashed %#x\n",
3765 cur_phy_fw_ver, new_phy_fw_vers);
3774 * t4_fwcache - firmware cache operation
3775 * @adap: the adapter
3776 * @op : the operation (flush or flush and invalidate)
3778 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3780 struct fw_params_cmd c;
3782 memset(&c, 0, sizeof(c));
3784 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3785 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3786 FW_PARAMS_CMD_PFN_V(adap->pf) |
3787 FW_PARAMS_CMD_VFN_V(0));
3788 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3790 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3791 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3792 c.param[0].val = (__force __be32)op;
3794 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3797 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3798 unsigned int *pif_req_wrptr,
3799 unsigned int *pif_rsp_wrptr)
3802 u32 cfg, val, req, rsp;
3804 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3805 if (cfg & LADBGEN_F)
3806 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3808 val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3809 req = POLADBGWRPTR_G(val);
3810 rsp = PILADBGWRPTR_G(val);
3812 *pif_req_wrptr = req;
3814 *pif_rsp_wrptr = rsp;
3816 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3817 for (j = 0; j < 6; j++) {
3818 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3819 PILADBGRDPTR_V(rsp));
3820 *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3821 *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3825 req = (req + 2) & POLADBGRDPTR_M;
3826 rsp = (rsp + 2) & PILADBGRDPTR_M;
3828 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3831 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3836 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3837 if (cfg & LADBGEN_F)
3838 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3840 for (i = 0; i < CIM_MALA_SIZE; i++) {
3841 for (j = 0; j < 5; j++) {
3843 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3844 PILADBGRDPTR_V(idx));
3845 *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3846 *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3849 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3852 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3856 for (i = 0; i < 8; i++) {
3857 u32 *p = la_buf + i;
3859 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3860 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3861 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3862 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3863 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3867 #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3871 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3872 * @caps16: a 16-bit Port Capabilities value
3874 * Returns the equivalent 32-bit Port Capabilities value.
3876 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3878 fw_port_cap32_t caps32 = 0;
3880 #define CAP16_TO_CAP32(__cap) \
3882 if (caps16 & FW_PORT_CAP_##__cap) \
3883 caps32 |= FW_PORT_CAP32_##__cap; \
3886 CAP16_TO_CAP32(SPEED_100M);
3887 CAP16_TO_CAP32(SPEED_1G);
3888 CAP16_TO_CAP32(SPEED_25G);
3889 CAP16_TO_CAP32(SPEED_10G);
3890 CAP16_TO_CAP32(SPEED_40G);
3891 CAP16_TO_CAP32(SPEED_100G);
3892 CAP16_TO_CAP32(FC_RX);
3893 CAP16_TO_CAP32(FC_TX);
3894 CAP16_TO_CAP32(ANEG);
3895 CAP16_TO_CAP32(MDIX);
3896 CAP16_TO_CAP32(MDIAUTO);
3897 CAP16_TO_CAP32(FEC_RS);
3898 CAP16_TO_CAP32(FEC_BASER_RS);
3899 CAP16_TO_CAP32(802_3_PAUSE);
3900 CAP16_TO_CAP32(802_3_ASM_DIR);
3902 #undef CAP16_TO_CAP32
3908 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3909 * @caps32: a 32-bit Port Capabilities value
3911 * Returns the equivalent 16-bit Port Capabilities value. Note that
3912 * not all 32-bit Port Capabilities can be represented in the 16-bit
3913 * Port Capabilities and some fields/values may not make it.
3915 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
3917 fw_port_cap16_t caps16 = 0;
3919 #define CAP32_TO_CAP16(__cap) \
3921 if (caps32 & FW_PORT_CAP32_##__cap) \
3922 caps16 |= FW_PORT_CAP_##__cap; \
3925 CAP32_TO_CAP16(SPEED_100M);
3926 CAP32_TO_CAP16(SPEED_1G);
3927 CAP32_TO_CAP16(SPEED_10G);
3928 CAP32_TO_CAP16(SPEED_25G);
3929 CAP32_TO_CAP16(SPEED_40G);
3930 CAP32_TO_CAP16(SPEED_100G);
3931 CAP32_TO_CAP16(FC_RX);
3932 CAP32_TO_CAP16(FC_TX);
3933 CAP32_TO_CAP16(802_3_PAUSE);
3934 CAP32_TO_CAP16(802_3_ASM_DIR);
3935 CAP32_TO_CAP16(ANEG);
3936 CAP32_TO_CAP16(MDIX);
3937 CAP32_TO_CAP16(MDIAUTO);
3938 CAP32_TO_CAP16(FEC_RS);
3939 CAP32_TO_CAP16(FEC_BASER_RS);
3941 #undef CAP32_TO_CAP16
3946 /* Translate Firmware Port Capabilities Pause specification to Common Code */
3947 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
3949 enum cc_pause cc_pause = 0;
3951 if (fw_pause & FW_PORT_CAP32_FC_RX)
3952 cc_pause |= PAUSE_RX;
3953 if (fw_pause & FW_PORT_CAP32_FC_TX)
3954 cc_pause |= PAUSE_TX;
3959 /* Translate Common Code Pause specification into Firmware Port Capabilities */
3960 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
3962 fw_port_cap32_t fw_pause = 0;
3964 if (cc_pause & PAUSE_RX)
3965 fw_pause |= FW_PORT_CAP32_FC_RX;
3966 if (cc_pause & PAUSE_TX)
3967 fw_pause |= FW_PORT_CAP32_FC_TX;
3972 /* Translate Firmware Forward Error Correction specification to Common Code */
3973 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
3975 enum cc_fec cc_fec = 0;
3977 if (fw_fec & FW_PORT_CAP32_FEC_RS)
3979 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
3980 cc_fec |= FEC_BASER_RS;
3985 /* Translate Common Code Forward Error Correction specification to Firmware */
3986 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
3988 fw_port_cap32_t fw_fec = 0;
3990 if (cc_fec & FEC_RS)
3991 fw_fec |= FW_PORT_CAP32_FEC_RS;
3992 if (cc_fec & FEC_BASER_RS)
3993 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
3999 * t4_link_l1cfg - apply link configuration to MAC/PHY
4000 * @adapter: the adapter
4001 * @mbox: the Firmware Mailbox to use
4002 * @port: the Port ID
4003 * @lc: the Port's Link Configuration
4005 * Set up a port's MAC and PHY according to a desired link configuration.
4006 * - If the PHY can auto-negotiate first decide what to advertise, then
4007 * enable/disable auto-negotiation as desired, and reset.
4008 * - If the PHY does not auto-negotiate just reset it.
4009 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4010 * otherwise do it later based on the outcome of auto-negotiation.
4012 int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
4013 unsigned int port, struct link_config *lc)
4015 unsigned int fw_caps = adapter->params.fw_caps_support;
4016 struct fw_port_cmd cmd;
4017 unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO);
4018 fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
4022 /* Convert driver coding of Pause Frame Flow Control settings into the
4025 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4027 /* Convert Common Code Forward Error Control settings into the
4028 * Firmware's API. If the current Requested FEC has "Automatic"
4029 * (IEEE 802.3) specified, then we use whatever the Firmware
4030 * sent us as part of it's IEEE 802.3-based interpratation of
4031 * the Transceiver Module EPROM FEC parameters. Otherwise we
4032 * use whatever is in the current Requested FEC settings.
4034 if (lc->requested_fec & FEC_AUTO)
4035 cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4037 cc_fec = lc->requested_fec;
4038 fw_fec = cc_to_fwcap_fec(cc_fec);
4040 /* Figure out what our Requested Port Capabilities are going to be.
4042 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4043 rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
4044 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4046 } else if (lc->autoneg == AUTONEG_DISABLE) {
4047 rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4048 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4051 rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
4054 /* And send that on to the Firmware ...
4056 memset(&cmd, 0, sizeof(cmd));
4057 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4058 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4059 FW_PORT_CMD_PORTID_V(port));
4060 cmd.action_to_len16 =
4061 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4062 ? FW_PORT_ACTION_L1_CFG
4063 : FW_PORT_ACTION_L1_CFG32) |
4065 if (fw_caps == FW_CAPS16)
4066 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4068 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4069 return t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4073 * t4_restart_aneg - restart autonegotiation
4074 * @adap: the adapter
4075 * @mbox: mbox to use for the FW command
4076 * @port: the port id
4078 * Restarts autonegotiation for the selected port.
4080 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4082 struct fw_port_cmd c;
4084 memset(&c, 0, sizeof(c));
4085 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4086 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4087 FW_PORT_CMD_PORTID_V(port));
4089 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
4091 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
4092 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4095 typedef void (*int_handler_t)(struct adapter *adap);
4098 unsigned int mask; /* bits to check in interrupt status */
4099 const char *msg; /* message to print or NULL */
4100 short stat_idx; /* stat counter to increment or -1 */
4101 unsigned short fatal; /* whether the condition reported is fatal */
4102 int_handler_t int_handler; /* platform-specific int handler */
4106 * t4_handle_intr_status - table driven interrupt handler
4107 * @adapter: the adapter that generated the interrupt
4108 * @reg: the interrupt status register to process
4109 * @acts: table of interrupt actions
4111 * A table driven interrupt handler that applies a set of masks to an
4112 * interrupt status word and performs the corresponding actions if the
4113 * interrupts described by the mask have occurred. The actions include
4114 * optionally emitting a warning or alert message. The table is terminated
4115 * by an entry specifying mask 0. Returns the number of fatal interrupt
4118 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4119 const struct intr_info *acts)
4122 unsigned int mask = 0;
4123 unsigned int status = t4_read_reg(adapter, reg);
4125 for ( ; acts->mask; ++acts) {
4126 if (!(status & acts->mask))
4130 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4131 status & acts->mask);
4132 } else if (acts->msg && printk_ratelimit())
4133 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4134 status & acts->mask);
4135 if (acts->int_handler)
4136 acts->int_handler(adapter);
4140 if (status) /* clear processed interrupts */
4141 t4_write_reg(adapter, reg, status);
4146 * Interrupt handler for the PCIE module.
4148 static void pcie_intr_handler(struct adapter *adapter)
4150 static const struct intr_info sysbus_intr_info[] = {
4151 { RNPP_F, "RXNP array parity error", -1, 1 },
4152 { RPCP_F, "RXPC array parity error", -1, 1 },
4153 { RCIP_F, "RXCIF array parity error", -1, 1 },
4154 { RCCP_F, "Rx completions control array parity error", -1, 1 },
4155 { RFTP_F, "RXFT array parity error", -1, 1 },
4158 static const struct intr_info pcie_port_intr_info[] = {
4159 { TPCP_F, "TXPC array parity error", -1, 1 },
4160 { TNPP_F, "TXNP array parity error", -1, 1 },
4161 { TFTP_F, "TXFT array parity error", -1, 1 },
4162 { TCAP_F, "TXCA array parity error", -1, 1 },
4163 { TCIP_F, "TXCIF array parity error", -1, 1 },
4164 { RCAP_F, "RXCA array parity error", -1, 1 },
4165 { OTDD_F, "outbound request TLP discarded", -1, 1 },
4166 { RDPE_F, "Rx data parity error", -1, 1 },
4167 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
4170 static const struct intr_info pcie_intr_info[] = {
4171 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4172 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4173 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4174 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4175 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4176 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4177 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4178 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4179 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4180 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4181 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4182 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4183 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4184 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4185 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4186 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4187 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4188 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4189 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4190 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4191 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4192 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4193 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4194 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4195 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4196 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4197 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4198 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
4199 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
4200 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
4205 static struct intr_info t5_pcie_intr_info[] = {
4206 { MSTGRPPERR_F, "Master Response Read Queue parity error",
4208 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4209 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4210 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4211 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4212 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4213 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4214 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
4216 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
4218 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4219 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4220 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4221 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4222 { DREQWRPERR_F, "PCI DMA channel write request parity error",
4224 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4225 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4226 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4227 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4228 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4229 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4230 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4231 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4232 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4233 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4234 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
4236 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4238 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4239 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4240 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4241 { READRSPERR_F, "Outbound read error", -1, 0 },
4247 if (is_t4(adapter->params.chip))
4248 fat = t4_handle_intr_status(adapter,
4249 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4251 t4_handle_intr_status(adapter,
4252 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4253 pcie_port_intr_info) +
4254 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4257 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4261 t4_fatal_err(adapter);
4265 * TP interrupt handler.
4267 static void tp_intr_handler(struct adapter *adapter)
4269 static const struct intr_info tp_intr_info[] = {
4270 { 0x3fffffff, "TP parity error", -1, 1 },
4271 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
4275 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
4276 t4_fatal_err(adapter);
4280 * SGE interrupt handler.
4282 static void sge_intr_handler(struct adapter *adapter)
4287 static const struct intr_info sge_intr_info[] = {
4288 { ERR_CPL_EXCEED_IQE_SIZE_F,
4289 "SGE received CPL exceeding IQE size", -1, 1 },
4290 { ERR_INVALID_CIDX_INC_F,
4291 "SGE GTS CIDX increment too large", -1, 0 },
4292 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4293 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
4294 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
4295 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4296 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
4298 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
4300 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
4302 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
4304 { ERR_ING_CTXT_PRIO_F,
4305 "SGE too many priority ingress contexts", -1, 0 },
4306 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4307 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
4311 static struct intr_info t4t5_sge_intr_info[] = {
4312 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4313 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4314 { ERR_EGR_CTXT_PRIO_F,
4315 "SGE too many priority egress contexts", -1, 0 },
4319 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
4320 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
4322 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
4323 (unsigned long long)v);
4324 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
4325 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
4328 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4329 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4330 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4331 t4t5_sge_intr_info);
4333 err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4334 if (err & ERROR_QID_VALID_F) {
4335 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4337 if (err & UNCAPTURED_ERROR_F)
4338 dev_err(adapter->pdev_dev,
4339 "SGE UNCAPTURED_ERROR set (clearing)\n");
4340 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4341 UNCAPTURED_ERROR_F);
4345 t4_fatal_err(adapter);
4348 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4349 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4350 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4351 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4354 * CIM interrupt handler.
4356 static void cim_intr_handler(struct adapter *adapter)
4358 static const struct intr_info cim_intr_info[] = {
4359 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4360 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4361 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4362 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4363 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4364 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4365 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4366 { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4369 static const struct intr_info cim_upintr_info[] = {
4370 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4371 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4372 { ILLWRINT_F, "CIM illegal write", -1, 1 },
4373 { ILLRDINT_F, "CIM illegal read", -1, 1 },
4374 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4375 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4376 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4377 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4378 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4379 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4380 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4381 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4382 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4383 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4384 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4385 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4386 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4387 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4388 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4389 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4390 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4391 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4392 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4393 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4394 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4395 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4396 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4397 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4404 fw_err = t4_read_reg(adapter, PCIE_FW_A);
4405 if (fw_err & PCIE_FW_ERR_F)
4406 t4_report_fw_error(adapter);
4408 /* When the Firmware detects an internal error which normally
4409 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4410 * in order to make sure the Host sees the Firmware Crash. So
4411 * if we have a Timer0 interrupt and don't see a Firmware Crash,
4412 * ignore the Timer0 interrupt.
4415 val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4416 if (val & TIMER0INT_F)
4417 if (!(fw_err & PCIE_FW_ERR_F) ||
4418 (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4419 t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4422 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4424 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4427 t4_fatal_err(adapter);
4431 * ULP RX interrupt handler.
4433 static void ulprx_intr_handler(struct adapter *adapter)
4435 static const struct intr_info ulprx_intr_info[] = {
4436 { 0x1800000, "ULPRX context error", -1, 1 },
4437 { 0x7fffff, "ULPRX parity error", -1, 1 },
4441 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4442 t4_fatal_err(adapter);
4446 * ULP TX interrupt handler.
4448 static void ulptx_intr_handler(struct adapter *adapter)
4450 static const struct intr_info ulptx_intr_info[] = {
4451 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4453 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4455 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4457 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4459 { 0xfffffff, "ULPTX parity error", -1, 1 },
4463 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4464 t4_fatal_err(adapter);
4468 * PM TX interrupt handler.
4470 static void pmtx_intr_handler(struct adapter *adapter)
4472 static const struct intr_info pmtx_intr_info[] = {
4473 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4474 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4475 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4476 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4477 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4478 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4479 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4481 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4482 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4486 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4487 t4_fatal_err(adapter);
4491 * PM RX interrupt handler.
4493 static void pmrx_intr_handler(struct adapter *adapter)
4495 static const struct intr_info pmrx_intr_info[] = {
4496 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4497 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4498 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4499 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4501 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4502 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4506 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4507 t4_fatal_err(adapter);
4511 * CPL switch interrupt handler.
4513 static void cplsw_intr_handler(struct adapter *adapter)
4515 static const struct intr_info cplsw_intr_info[] = {
4516 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4517 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4518 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4519 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4520 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4521 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4525 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4526 t4_fatal_err(adapter);
4530 * LE interrupt handler.
4532 static void le_intr_handler(struct adapter *adap)
4534 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4535 static const struct intr_info le_intr_info[] = {
4536 { LIPMISS_F, "LE LIP miss", -1, 0 },
4537 { LIP0_F, "LE 0 LIP error", -1, 0 },
4538 { PARITYERR_F, "LE parity error", -1, 1 },
4539 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4540 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
4544 static struct intr_info t6_le_intr_info[] = {
4545 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4546 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4547 { TCAMINTPERR_F, "LE parity error", -1, 1 },
4548 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4549 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4553 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4554 (chip <= CHELSIO_T5) ?
4555 le_intr_info : t6_le_intr_info))
4560 * MPS interrupt handler.
4562 static void mps_intr_handler(struct adapter *adapter)
4564 static const struct intr_info mps_rx_intr_info[] = {
4565 { 0xffffff, "MPS Rx parity error", -1, 1 },
4568 static const struct intr_info mps_tx_intr_info[] = {
4569 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4570 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4571 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4573 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4575 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4576 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4577 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4580 static const struct intr_info t6_mps_tx_intr_info[] = {
4581 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4582 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4583 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4585 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4587 /* MPS Tx Bubble is normal for T6 */
4588 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4589 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4592 static const struct intr_info mps_trc_intr_info[] = {
4593 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4594 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4596 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4599 static const struct intr_info mps_stat_sram_intr_info[] = {
4600 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4603 static const struct intr_info mps_stat_tx_intr_info[] = {
4604 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4607 static const struct intr_info mps_stat_rx_intr_info[] = {
4608 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4611 static const struct intr_info mps_cls_intr_info[] = {
4612 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4613 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4614 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4620 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4622 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4623 is_t6(adapter->params.chip)
4624 ? t6_mps_tx_intr_info
4625 : mps_tx_intr_info) +
4626 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4627 mps_trc_intr_info) +
4628 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4629 mps_stat_sram_intr_info) +
4630 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4631 mps_stat_tx_intr_info) +
4632 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4633 mps_stat_rx_intr_info) +
4634 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4637 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4638 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
4640 t4_fatal_err(adapter);
4643 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4647 * EDC/MC interrupt handler.
4649 static void mem_intr_handler(struct adapter *adapter, int idx)
4651 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4653 unsigned int addr, cnt_addr, v;
4655 if (idx <= MEM_EDC1) {
4656 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4657 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4658 } else if (idx == MEM_MC) {
4659 if (is_t4(adapter->params.chip)) {
4660 addr = MC_INT_CAUSE_A;
4661 cnt_addr = MC_ECC_STATUS_A;
4663 addr = MC_P_INT_CAUSE_A;
4664 cnt_addr = MC_P_ECC_STATUS_A;
4667 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4668 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4671 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4672 if (v & PERR_INT_CAUSE_F)
4673 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4675 if (v & ECC_CE_INT_CAUSE_F) {
4676 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4678 t4_edc_err_read(adapter, idx);
4680 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4681 if (printk_ratelimit())
4682 dev_warn(adapter->pdev_dev,
4683 "%u %s correctable ECC data error%s\n",
4684 cnt, name[idx], cnt > 1 ? "s" : "");
4686 if (v & ECC_UE_INT_CAUSE_F)
4687 dev_alert(adapter->pdev_dev,
4688 "%s uncorrectable ECC data error\n", name[idx]);
4690 t4_write_reg(adapter, addr, v);
4691 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4692 t4_fatal_err(adapter);
4696 * MA interrupt handler.
4698 static void ma_intr_handler(struct adapter *adap)
4700 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4702 if (status & MEM_PERR_INT_CAUSE_F) {
4703 dev_alert(adap->pdev_dev,
4704 "MA parity error, parity status %#x\n",
4705 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4706 if (is_t5(adap->params.chip))
4707 dev_alert(adap->pdev_dev,
4708 "MA parity error, parity status %#x\n",
4710 MA_PARITY_ERROR_STATUS2_A));
4712 if (status & MEM_WRAP_INT_CAUSE_F) {
4713 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4714 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4715 "client %u to address %#x\n",
4716 MEM_WRAP_CLIENT_NUM_G(v),
4717 MEM_WRAP_ADDRESS_G(v) << 4);
4719 t4_write_reg(adap, MA_INT_CAUSE_A, status);
4724 * SMB interrupt handler.
4726 static void smb_intr_handler(struct adapter *adap)
4728 static const struct intr_info smb_intr_info[] = {
4729 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4730 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4731 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4735 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4740 * NC-SI interrupt handler.
4742 static void ncsi_intr_handler(struct adapter *adap)
4744 static const struct intr_info ncsi_intr_info[] = {
4745 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4746 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4747 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4748 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4752 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4757 * XGMAC interrupt handler.
4759 static void xgmac_intr_handler(struct adapter *adap, int port)
4761 u32 v, int_cause_reg;
4763 if (is_t4(adap->params.chip))
4764 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4766 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4768 v = t4_read_reg(adap, int_cause_reg);
4770 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4774 if (v & TXFIFO_PRTY_ERR_F)
4775 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4777 if (v & RXFIFO_PRTY_ERR_F)
4778 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4780 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4785 * PL interrupt handler.
4787 static void pl_intr_handler(struct adapter *adap)
4789 static const struct intr_info pl_intr_info[] = {
4790 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
4791 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
4795 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
4799 #define PF_INTR_MASK (PFSW_F)
4800 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
4801 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
4802 CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
4805 * t4_slow_intr_handler - control path interrupt handler
4806 * @adapter: the adapter
4808 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4809 * The designation 'slow' is because it involves register reads, while
4810 * data interrupts typically don't involve any MMIOs.
4812 int t4_slow_intr_handler(struct adapter *adapter)
4814 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
4816 if (!(cause & GLBL_INTR_MASK))
4819 cim_intr_handler(adapter);
4821 mps_intr_handler(adapter);
4823 ncsi_intr_handler(adapter);
4825 pl_intr_handler(adapter);
4827 smb_intr_handler(adapter);
4828 if (cause & XGMAC0_F)
4829 xgmac_intr_handler(adapter, 0);
4830 if (cause & XGMAC1_F)
4831 xgmac_intr_handler(adapter, 1);
4832 if (cause & XGMAC_KR0_F)
4833 xgmac_intr_handler(adapter, 2);
4834 if (cause & XGMAC_KR1_F)
4835 xgmac_intr_handler(adapter, 3);
4837 pcie_intr_handler(adapter);
4839 mem_intr_handler(adapter, MEM_MC);
4840 if (is_t5(adapter->params.chip) && (cause & MC1_F))
4841 mem_intr_handler(adapter, MEM_MC1);
4843 mem_intr_handler(adapter, MEM_EDC0);
4845 mem_intr_handler(adapter, MEM_EDC1);
4847 le_intr_handler(adapter);
4849 tp_intr_handler(adapter);
4851 ma_intr_handler(adapter);
4852 if (cause & PM_TX_F)
4853 pmtx_intr_handler(adapter);
4854 if (cause & PM_RX_F)
4855 pmrx_intr_handler(adapter);
4856 if (cause & ULP_RX_F)
4857 ulprx_intr_handler(adapter);
4858 if (cause & CPL_SWITCH_F)
4859 cplsw_intr_handler(adapter);
4861 sge_intr_handler(adapter);
4862 if (cause & ULP_TX_F)
4863 ulptx_intr_handler(adapter);
4865 /* Clear the interrupts just processed for which we are the master. */
4866 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
4867 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
4872 * t4_intr_enable - enable interrupts
4873 * @adapter: the adapter whose interrupts should be enabled
4875 * Enable PF-specific interrupts for the calling function and the top-level
4876 * interrupt concentrator for global interrupts. Interrupts are already
4877 * enabled at each module, here we just enable the roots of the interrupt
4880 * Note: this function should be called only when the driver manages
4881 * non PF-specific interrupts from the various HW modules. Only one PCI
4882 * function at a time should be doing this.
4884 void t4_intr_enable(struct adapter *adapter)
4887 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4888 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4889 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4891 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4892 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
4893 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
4894 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
4895 ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
4896 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
4897 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
4898 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
4899 DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
4900 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
4901 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
4905 * t4_intr_disable - disable interrupts
4906 * @adapter: the adapter whose interrupts should be disabled
4908 * Disable interrupts. We only disable the top-level interrupt
4909 * concentrators. The caller must be a PCI function managing global
4912 void t4_intr_disable(struct adapter *adapter)
4916 if (pci_channel_offline(adapter->pdev))
4919 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4920 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4921 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4923 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
4924 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
4928 * t4_config_rss_range - configure a portion of the RSS mapping table
4929 * @adapter: the adapter
4930 * @mbox: mbox to use for the FW command
4931 * @viid: virtual interface whose RSS subtable is to be written
4932 * @start: start entry in the table to write
4933 * @n: how many table entries to write
4934 * @rspq: values for the response queue lookup table
4935 * @nrspq: number of values in @rspq
4937 * Programs the selected part of the VI's RSS mapping table with the
4938 * provided values. If @nrspq < @n the supplied values are used repeatedly
4939 * until the full table range is populated.
4941 * The caller must ensure the values in @rspq are in the range allowed for
4944 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4945 int start, int n, const u16 *rspq, unsigned int nrspq)
4948 const u16 *rsp = rspq;
4949 const u16 *rsp_end = rspq + nrspq;
4950 struct fw_rss_ind_tbl_cmd cmd;
4952 memset(&cmd, 0, sizeof(cmd));
4953 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
4954 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4955 FW_RSS_IND_TBL_CMD_VIID_V(viid));
4956 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4958 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
4960 int nq = min(n, 32);
4961 __be32 *qp = &cmd.iq0_to_iq2;
4963 cmd.niqid = cpu_to_be16(nq);
4964 cmd.startidx = cpu_to_be16(start);
4972 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
4973 if (++rsp >= rsp_end)
4975 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
4976 if (++rsp >= rsp_end)
4978 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
4979 if (++rsp >= rsp_end)
4982 *qp++ = cpu_to_be32(v);
4986 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4994 * t4_config_glbl_rss - configure the global RSS mode
4995 * @adapter: the adapter
4996 * @mbox: mbox to use for the FW command
4997 * @mode: global RSS mode
4998 * @flags: mode-specific flags
5000 * Sets the global RSS mode.
5002 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5005 struct fw_rss_glb_config_cmd c;
5007 memset(&c, 0, sizeof(c));
5008 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5009 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5010 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5011 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5012 c.u.manual.mode_pkd =
5013 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5014 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5015 c.u.basicvirtual.mode_pkd =
5016 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5017 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5020 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5024 * t4_config_vi_rss - configure per VI RSS settings
5025 * @adapter: the adapter
5026 * @mbox: mbox to use for the FW command
5029 * @defq: id of the default RSS queue for the VI.
5031 * Configures VI-specific RSS properties.
5033 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5034 unsigned int flags, unsigned int defq)
5036 struct fw_rss_vi_config_cmd c;
5038 memset(&c, 0, sizeof(c));
5039 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5040 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5041 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5042 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5043 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5044 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5045 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5048 /* Read an RSS table row */
5049 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5051 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5052 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5057 * t4_read_rss - read the contents of the RSS mapping table
5058 * @adapter: the adapter
5059 * @map: holds the contents of the RSS mapping table
5061 * Reads the contents of the RSS hash->queue mapping table.
5063 int t4_read_rss(struct adapter *adapter, u16 *map)
5068 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
5069 ret = rd_rss_row(adapter, i, &val);
5072 *map++ = LKPTBLQUEUE0_G(val);
5073 *map++ = LKPTBLQUEUE1_G(val);
5078 static unsigned int t4_use_ldst(struct adapter *adap)
5080 return (adap->flags & FW_OK) || !adap->use_bd;
5084 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5085 * @adap: the adapter
5086 * @cmd: TP fw ldst address space type
5087 * @vals: where the indirect register values are stored/written
5088 * @nregs: how many indirect registers to read/write
5089 * @start_idx: index of first indirect register to read/write
5090 * @rw: Read (1) or Write (0)
5091 * @sleep_ok: if true we may sleep while awaiting command completion
5093 * Access TP indirect registers through LDST
5095 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5096 unsigned int nregs, unsigned int start_index,
5097 unsigned int rw, bool sleep_ok)
5101 struct fw_ldst_cmd c;
5103 for (i = 0; i < nregs; i++) {
5104 memset(&c, 0, sizeof(c));
5105 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5107 (rw ? FW_CMD_READ_F :
5109 FW_LDST_CMD_ADDRSPACE_V(cmd));
5110 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5112 c.u.addrval.addr = cpu_to_be32(start_index + i);
5113 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5114 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5120 vals[i] = be32_to_cpu(c.u.addrval.val);
5126 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5127 * @adap: the adapter
5128 * @reg_addr: Address Register
5129 * @reg_data: Data register
5130 * @buff: where the indirect register values are stored/written
5131 * @nregs: how many indirect registers to read/write
5132 * @start_index: index of first indirect register to read/write
5133 * @rw: READ(1) or WRITE(0)
5134 * @sleep_ok: if true we may sleep while awaiting command completion
5136 * Read/Write TP indirect registers through LDST if possible.
5137 * Else, use backdoor access
5139 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5140 u32 *buff, u32 nregs, u32 start_index, int rw,
5148 cmd = FW_LDST_ADDRSPC_TP_PIO;
5150 case TP_TM_PIO_ADDR_A:
5151 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5153 case TP_MIB_INDEX_A:
5154 cmd = FW_LDST_ADDRSPC_TP_MIB;
5157 goto indirect_access;
5160 if (t4_use_ldst(adap))
5161 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5168 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5171 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5177 * t4_tp_pio_read - Read TP PIO registers
5178 * @adap: the adapter
5179 * @buff: where the indirect register values are written
5180 * @nregs: how many indirect registers to read
5181 * @start_index: index of first indirect register to read
5182 * @sleep_ok: if true we may sleep while awaiting command completion
5184 * Read TP PIO Registers
5186 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5187 u32 start_index, bool sleep_ok)
5189 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5190 start_index, 1, sleep_ok);
5194 * t4_tp_pio_write - Write TP PIO registers
5195 * @adap: the adapter
5196 * @buff: where the indirect register values are stored
5197 * @nregs: how many indirect registers to write
5198 * @start_index: index of first indirect register to write
5199 * @sleep_ok: if true we may sleep while awaiting command completion
5201 * Write TP PIO Registers
5203 static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5204 u32 start_index, bool sleep_ok)
5206 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5207 start_index, 0, sleep_ok);
5211 * t4_tp_tm_pio_read - Read TP TM PIO registers
5212 * @adap: the adapter
5213 * @buff: where the indirect register values are written
5214 * @nregs: how many indirect registers to read
5215 * @start_index: index of first indirect register to read
5216 * @sleep_ok: if true we may sleep while awaiting command completion
5218 * Read TP TM PIO Registers
5220 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5221 u32 start_index, bool sleep_ok)
5223 t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5224 nregs, start_index, 1, sleep_ok);
5228 * t4_tp_mib_read - Read TP MIB registers
5229 * @adap: the adapter
5230 * @buff: where the indirect register values are written
5231 * @nregs: how many indirect registers to read
5232 * @start_index: index of first indirect register to read
5233 * @sleep_ok: if true we may sleep while awaiting command completion
5235 * Read TP MIB Registers
5237 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5240 t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5241 start_index, 1, sleep_ok);
5245 * t4_read_rss_key - read the global RSS key
5246 * @adap: the adapter
5247 * @key: 10-entry array holding the 320-bit RSS key
5248 * @sleep_ok: if true we may sleep while awaiting command completion
5250 * Reads the global 320-bit RSS key.
5252 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5254 t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5258 * t4_write_rss_key - program one of the RSS keys
5259 * @adap: the adapter
5260 * @key: 10-entry array holding the 320-bit RSS key
5261 * @idx: which RSS key to write
5262 * @sleep_ok: if true we may sleep while awaiting command completion
5264 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5265 * 0..15 the corresponding entry in the RSS key table is written,
5266 * otherwise the global RSS key is written.
5268 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5271 u8 rss_key_addr_cnt = 16;
5272 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5274 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5275 * allows access to key addresses 16-63 by using KeyWrAddrX
5276 * as index[5:4](upper 2) into key table
5278 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5279 (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5280 rss_key_addr_cnt = 32;
5282 t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5284 if (idx >= 0 && idx < rss_key_addr_cnt) {
5285 if (rss_key_addr_cnt > 16)
5286 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5287 KEYWRADDRX_V(idx >> 4) |
5288 T6_VFWRADDR_V(idx) | KEYWREN_F);
5290 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5291 KEYWRADDR_V(idx) | KEYWREN_F);
5296 * t4_read_rss_pf_config - read PF RSS Configuration Table
5297 * @adapter: the adapter
5298 * @index: the entry in the PF RSS table to read
5299 * @valp: where to store the returned value
5300 * @sleep_ok: if true we may sleep while awaiting command completion
5302 * Reads the PF RSS Configuration Table at the specified index and returns
5303 * the value found there.
5305 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5306 u32 *valp, bool sleep_ok)
5308 t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
5312 * t4_read_rss_vf_config - read VF RSS Configuration Table
5313 * @adapter: the adapter
5314 * @index: the entry in the VF RSS table to read
5315 * @vfl: where to store the returned VFL
5316 * @vfh: where to store the returned VFH
5317 * @sleep_ok: if true we may sleep while awaiting command completion
5319 * Reads the VF RSS Configuration Table at the specified index and returns
5320 * the (VFL, VFH) values found there.
5322 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5323 u32 *vfl, u32 *vfh, bool sleep_ok)
5325 u32 vrt, mask, data;
5327 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5328 mask = VFWRADDR_V(VFWRADDR_M);
5329 data = VFWRADDR_V(index);
5331 mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
5332 data = T6_VFWRADDR_V(index);
5335 /* Request that the index'th VF Table values be read into VFL/VFH.
5337 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5338 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5339 vrt |= data | VFRDEN_F;
5340 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5342 /* Grab the VFL/VFH values ...
5344 t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5345 t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
5349 * t4_read_rss_pf_map - read PF RSS Map
5350 * @adapter: the adapter
5351 * @sleep_ok: if true we may sleep while awaiting command completion
5353 * Reads the PF RSS Map register and returns its value.
5355 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5359 t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
5364 * t4_read_rss_pf_mask - read PF RSS Mask
5365 * @adapter: the adapter
5366 * @sleep_ok: if true we may sleep while awaiting command completion
5368 * Reads the PF RSS Mask register and returns its value.
5370 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5374 t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
5379 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5380 * @adap: the adapter
5381 * @v4: holds the TCP/IP counter values
5382 * @v6: holds the TCP/IPv6 counter values
5383 * @sleep_ok: if true we may sleep while awaiting command completion
5385 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5386 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5388 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5389 struct tp_tcp_stats *v6, bool sleep_ok)
5391 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
5393 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
5394 #define STAT(x) val[STAT_IDX(x)]
5395 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5398 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5399 TP_MIB_TCP_OUT_RST_A, sleep_ok);
5400 v4->tcp_out_rsts = STAT(OUT_RST);
5401 v4->tcp_in_segs = STAT64(IN_SEG);
5402 v4->tcp_out_segs = STAT64(OUT_SEG);
5403 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5406 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5407 TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
5408 v6->tcp_out_rsts = STAT(OUT_RST);
5409 v6->tcp_in_segs = STAT64(IN_SEG);
5410 v6->tcp_out_segs = STAT64(OUT_SEG);
5411 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5419 * t4_tp_get_err_stats - read TP's error MIB counters
5420 * @adap: the adapter
5421 * @st: holds the counter values
5422 * @sleep_ok: if true we may sleep while awaiting command completion
5424 * Returns the values of TP's error counters.
5426 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5429 int nchan = adap->params.arch.nchan;
5431 t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5433 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5435 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5437 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5438 TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5439 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5440 TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5441 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5443 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5444 TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5445 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5446 TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5447 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5452 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5453 * @adap: the adapter
5454 * @st: holds the counter values
5455 * @sleep_ok: if true we may sleep while awaiting command completion
5457 * Returns the values of TP's CPL counters.
5459 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5462 int nchan = adap->params.arch.nchan;
5464 t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
5466 t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
5470 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5471 * @adap: the adapter
5472 * @st: holds the counter values
5473 * @sleep_ok: if true we may sleep while awaiting command completion
5475 * Returns the values of TP's RDMA counters.
5477 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5480 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5485 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5486 * @adap: the adapter
5487 * @idx: the port index
5488 * @st: holds the counter values
5489 * @sleep_ok: if true we may sleep while awaiting command completion
5491 * Returns the values of TP's FCoE counters for the selected port.
5493 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5494 struct tp_fcoe_stats *st, bool sleep_ok)
5498 t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5501 t4_tp_mib_read(adap, &st->frames_drop, 1,
5502 TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5504 t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5507 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5511 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5512 * @adap: the adapter
5513 * @st: holds the counter values
5514 * @sleep_ok: if true we may sleep while awaiting command completion
5516 * Returns the values of TP's counters for non-TCP directly-placed packets.
5518 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5523 t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
5524 st->frames = val[0];
5526 st->octets = ((u64)val[2] << 32) | val[3];
5530 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5531 * @adap: the adapter
5532 * @mtus: where to store the MTU values
5533 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5535 * Reads the HW path MTU table.
5537 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5542 for (i = 0; i < NMTUS; ++i) {
5543 t4_write_reg(adap, TP_MTU_TABLE_A,
5544 MTUINDEX_V(0xff) | MTUVALUE_V(i));
5545 v = t4_read_reg(adap, TP_MTU_TABLE_A);
5546 mtus[i] = MTUVALUE_G(v);
5548 mtu_log[i] = MTUWIDTH_G(v);
5553 * t4_read_cong_tbl - reads the congestion control table
5554 * @adap: the adapter
5555 * @incr: where to store the alpha values
5557 * Reads the additive increments programmed into the HW congestion
5560 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5562 unsigned int mtu, w;
5564 for (mtu = 0; mtu < NMTUS; ++mtu)
5565 for (w = 0; w < NCCTRL_WIN; ++w) {
5566 t4_write_reg(adap, TP_CCTRL_TABLE_A,
5567 ROWINDEX_V(0xffff) | (mtu << 5) | w);
5568 incr[mtu][w] = (u16)t4_read_reg(adap,
5569 TP_CCTRL_TABLE_A) & 0x1fff;
5574 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5575 * @adap: the adapter
5576 * @addr: the indirect TP register address
5577 * @mask: specifies the field within the register to modify
5578 * @val: new value for the field
5580 * Sets a field of an indirect TP register to the given value.
5582 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5583 unsigned int mask, unsigned int val)
5585 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5586 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5587 t4_write_reg(adap, TP_PIO_DATA_A, val);
5591 * init_cong_ctrl - initialize congestion control parameters
5592 * @a: the alpha values for congestion control
5593 * @b: the beta values for congestion control
5595 * Initialize the congestion control parameters.
5597 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5599 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5624 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5627 b[13] = b[14] = b[15] = b[16] = 3;
5628 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5629 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5634 /* The minimum additive increment value for the congestion control table */
5635 #define CC_MIN_INCR 2U
5638 * t4_load_mtus - write the MTU and congestion control HW tables
5639 * @adap: the adapter
5640 * @mtus: the values for the MTU table
5641 * @alpha: the values for the congestion control alpha parameter
5642 * @beta: the values for the congestion control beta parameter
5644 * Write the HW MTU table with the supplied MTUs and the high-speed
5645 * congestion control table with the supplied alpha, beta, and MTUs.
5646 * We write the two tables together because the additive increments
5647 * depend on the MTUs.
5649 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5650 const unsigned short *alpha, const unsigned short *beta)
5652 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5653 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5654 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5655 28672, 40960, 57344, 81920, 114688, 163840, 229376
5660 for (i = 0; i < NMTUS; ++i) {
5661 unsigned int mtu = mtus[i];
5662 unsigned int log2 = fls(mtu);
5664 if (!(mtu & ((1 << log2) >> 2))) /* round */
5666 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5667 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5669 for (w = 0; w < NCCTRL_WIN; ++w) {
5672 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5675 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5676 (w << 16) | (beta[w] << 13) | inc);
5681 /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5682 * clocks. The formula is
5684 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5686 * which is equivalent to
5688 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5690 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5692 u64 v = bytes256 * adap->params.vpd.cclk;
5694 return v * 62 + v / 2;
5698 * t4_get_chan_txrate - get the current per channel Tx rates
5699 * @adap: the adapter
5700 * @nic_rate: rates for NIC traffic
5701 * @ofld_rate: rates for offloaded traffic
5703 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5706 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5710 v = t4_read_reg(adap, TP_TX_TRATE_A);
5711 nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5712 nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5713 if (adap->params.arch.nchan == NCHAN) {
5714 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5715 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5718 v = t4_read_reg(adap, TP_TX_ORATE_A);
5719 ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5720 ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5721 if (adap->params.arch.nchan == NCHAN) {
5722 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5723 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5728 * t4_set_trace_filter - configure one of the tracing filters
5729 * @adap: the adapter
5730 * @tp: the desired trace filter parameters
5731 * @idx: which filter to configure
5732 * @enable: whether to enable or disable the filter
5734 * Configures one of the tracing filters available in HW. If @enable is
5735 * %0 @tp is not examined and may be %NULL. The user is responsible to
5736 * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5738 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5739 int idx, int enable)
5741 int i, ofst = idx * 4;
5742 u32 data_reg, mask_reg, cfg;
5743 u32 multitrc = TRCMULTIFILTER_F;
5746 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5750 cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5751 if (cfg & TRCMULTIFILTER_F) {
5752 /* If multiple tracers are enabled, then maximum
5753 * capture size is 2.5KB (FIFO size of a single channel)
5754 * minus 2 flits for CPL_TRACE_PKT header.
5756 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5759 /* If multiple tracers are disabled, to avoid deadlocks
5760 * maximum packet capture size of 9600 bytes is recommended.
5761 * Also in this mode, only trace0 can be enabled and running.
5764 if (tp->snap_len > 9600 || idx)
5768 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5769 tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5770 tp->min_len > TFMINPKTSIZE_M)
5773 /* stop the tracer we'll be changing */
5774 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5776 idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5777 data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5778 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5780 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5781 t4_write_reg(adap, data_reg, tp->data[i]);
5782 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5784 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5785 TFCAPTUREMAX_V(tp->snap_len) |
5786 TFMINPKTSIZE_V(tp->min_len));
5787 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
5788 TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
5789 (is_t4(adap->params.chip) ?
5790 TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
5791 T5_TFPORT_V(tp->port) | T5_TFEN_F |
5792 T5_TFINVERTMATCH_V(tp->invert)));
5798 * t4_get_trace_filter - query one of the tracing filters
5799 * @adap: the adapter
5800 * @tp: the current trace filter parameters
5801 * @idx: which trace filter to query
5802 * @enabled: non-zero if the filter is enabled
5804 * Returns the current settings of one of the HW tracing filters.
5806 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5810 int i, ofst = idx * 4;
5811 u32 data_reg, mask_reg;
5813 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
5814 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
5816 if (is_t4(adap->params.chip)) {
5817 *enabled = !!(ctla & TFEN_F);
5818 tp->port = TFPORT_G(ctla);
5819 tp->invert = !!(ctla & TFINVERTMATCH_F);
5821 *enabled = !!(ctla & T5_TFEN_F);
5822 tp->port = T5_TFPORT_G(ctla);
5823 tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
5825 tp->snap_len = TFCAPTUREMAX_G(ctlb);
5826 tp->min_len = TFMINPKTSIZE_G(ctlb);
5827 tp->skip_ofst = TFOFFSET_G(ctla);
5828 tp->skip_len = TFLENGTH_G(ctla);
5830 ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
5831 data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
5832 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
5834 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5835 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5836 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5841 * t4_pmtx_get_stats - returns the HW stats from PMTX
5842 * @adap: the adapter
5843 * @cnt: where to store the count statistics
5844 * @cycles: where to store the cycle statistics
5846 * Returns performance statistics from PMTX.
5848 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5853 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5854 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
5855 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
5856 if (is_t4(adap->params.chip)) {
5857 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
5859 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
5860 PM_TX_DBG_DATA_A, data, 2,
5861 PM_TX_DBG_STAT_MSB_A);
5862 cycles[i] = (((u64)data[0] << 32) | data[1]);
5868 * t4_pmrx_get_stats - returns the HW stats from PMRX
5869 * @adap: the adapter
5870 * @cnt: where to store the count statistics
5871 * @cycles: where to store the cycle statistics
5873 * Returns performance statistics from PMRX.
5875 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5880 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5881 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
5882 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
5883 if (is_t4(adap->params.chip)) {
5884 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
5886 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
5887 PM_RX_DBG_DATA_A, data, 2,
5888 PM_RX_DBG_STAT_MSB_A);
5889 cycles[i] = (((u64)data[0] << 32) | data[1]);
5895 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
5896 * @adap: the adapter
5897 * @pidx: the port index
5899 * Computes and returns a bitmap indicating which MPS buffer groups are
5900 * associated with the given Port. Bit i is set if buffer group i is
5903 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
5906 unsigned int chip_version, nports;
5908 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
5909 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
5911 switch (chip_version) {
5916 case 2: return 3 << (2 * pidx);
5917 case 4: return 1 << pidx;
5923 case 2: return 1 << (2 * pidx);
5928 dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
5929 chip_version, nports);
5935 * t4_get_mps_bg_map - return the buffer groups associated with a port
5936 * @adapter: the adapter
5937 * @pidx: the port index
5939 * Returns a bitmap indicating which MPS buffer groups are associated
5940 * with the given Port. Bit i is set if buffer group i is used by the
5943 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
5946 unsigned int nports;
5948 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
5949 if (pidx >= nports) {
5950 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
5955 /* If we've already retrieved/computed this, just return the result.
5957 mps_bg_map = adapter->params.mps_bg_map;
5958 if (mps_bg_map[pidx])
5959 return mps_bg_map[pidx];
5961 /* Newer Firmware can tell us what the MPS Buffer Group Map is.
5962 * If we're talking to such Firmware, let it tell us. If the new
5963 * API isn't supported, revert back to old hardcoded way. The value
5964 * obtained from Firmware is encoded in below format:
5966 * val = (( MPSBGMAP[Port 3] << 24 ) |
5967 * ( MPSBGMAP[Port 2] << 16 ) |
5968 * ( MPSBGMAP[Port 1] << 8 ) |
5969 * ( MPSBGMAP[Port 0] << 0 ))
5971 if (adapter->flags & FW_OK) {
5975 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5976 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
5977 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
5978 0, 1, ¶m, &val);
5982 /* Store the BG Map for all of the Ports in order to
5983 * avoid more calls to the Firmware in the future.
5985 for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
5986 mps_bg_map[p] = val & 0xff;
5988 return mps_bg_map[pidx];
5992 /* Either we're not talking to the Firmware or we're dealing with
5993 * older Firmware which doesn't support the new API to get the MPS
5994 * Buffer Group Map. Fall back to computing it ourselves.
5996 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
5997 return mps_bg_map[pidx];
6001 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6002 * @adapter: the adapter
6003 * @pidx: the port index
6005 * Returns a bitmap indicating which TP Ingress Channels are associated
6006 * with a given Port. Bit i is set if TP Ingress Channel i is used by
6009 unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6011 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6012 unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6014 if (pidx >= nports) {
6015 dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6020 switch (chip_version) {
6023 /* Note that this happens to be the same values as the MPS
6024 * Buffer Group Map for these Chips. But we replicate the code
6025 * here because they're really separate concepts.
6029 case 2: return 3 << (2 * pidx);
6030 case 4: return 1 << pidx;
6036 case 2: return 1 << pidx;
6041 dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6042 chip_version, nports);
6047 * t4_get_port_type_description - return Port Type string description
6048 * @port_type: firmware Port Type enumeration
6050 const char *t4_get_port_type_description(enum fw_port_type port_type)
6052 static const char *const port_type_description[] = {
6077 if (port_type < ARRAY_SIZE(port_type_description))
6078 return port_type_description[port_type];
6083 * t4_get_port_stats_offset - collect port stats relative to a previous
6085 * @adap: The adapter
6087 * @stats: Current stats to fill
6088 * @offset: Previous stats snapshot
6090 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6091 struct port_stats *stats,
6092 struct port_stats *offset)
6097 t4_get_port_stats(adap, idx, stats);
6098 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6099 i < (sizeof(struct port_stats) / sizeof(u64));
6105 * t4_get_port_stats - collect port statistics
6106 * @adap: the adapter
6107 * @idx: the port index
6108 * @p: the stats structure to fill
6110 * Collect statistics related to the given port from HW.
6112 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6114 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6115 u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
6117 #define GET_STAT(name) \
6118 t4_read_reg64(adap, \
6119 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
6120 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
6121 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6123 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6124 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6125 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6126 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6127 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6128 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6129 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6130 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6131 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6132 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6133 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6134 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6135 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6136 p->tx_drop = GET_STAT(TX_PORT_DROP);
6137 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6138 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6139 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6140 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6141 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6142 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6143 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6144 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6145 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6147 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6148 if (stat_ctl & COUNTPAUSESTATTX_F)
6149 p->tx_frames_64 -= p->tx_pause;
6150 if (stat_ctl & COUNTPAUSEMCTX_F)
6151 p->tx_mcast_frames -= p->tx_pause;
6153 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6154 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6155 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6156 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6157 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6158 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6159 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6160 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6161 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6162 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6163 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6164 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6165 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6166 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6167 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6168 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6169 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6170 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6171 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6172 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6173 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6174 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6175 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6176 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6177 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6178 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6179 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6181 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6182 if (stat_ctl & COUNTPAUSESTATRX_F)
6183 p->rx_frames_64 -= p->rx_pause;
6184 if (stat_ctl & COUNTPAUSEMCRX_F)
6185 p->rx_mcast_frames -= p->rx_pause;
6188 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6189 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6190 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6191 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6192 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6193 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6194 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6195 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6202 * t4_get_lb_stats - collect loopback port statistics
6203 * @adap: the adapter
6204 * @idx: the loopback port index
6205 * @p: the stats structure to fill
6207 * Return HW statistics for the given loopback port.
6209 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6211 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6213 #define GET_STAT(name) \
6214 t4_read_reg64(adap, \
6215 (is_t4(adap->params.chip) ? \
6216 PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6217 T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6218 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6220 p->octets = GET_STAT(BYTES);
6221 p->frames = GET_STAT(FRAMES);
6222 p->bcast_frames = GET_STAT(BCAST);
6223 p->mcast_frames = GET_STAT(MCAST);
6224 p->ucast_frames = GET_STAT(UCAST);
6225 p->error_frames = GET_STAT(ERROR);
6227 p->frames_64 = GET_STAT(64B);
6228 p->frames_65_127 = GET_STAT(65B_127B);
6229 p->frames_128_255 = GET_STAT(128B_255B);
6230 p->frames_256_511 = GET_STAT(256B_511B);
6231 p->frames_512_1023 = GET_STAT(512B_1023B);
6232 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6233 p->frames_1519_max = GET_STAT(1519B_MAX);
6234 p->drop = GET_STAT(DROP_FRAMES);
6236 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6237 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6238 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6239 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6240 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6241 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6242 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6243 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6249 /* t4_mk_filtdelwr - create a delete filter WR
6250 * @ftid: the filter ID
6251 * @wr: the filter work request to populate
6252 * @qid: ingress queue to receive the delete notification
6254 * Creates a filter work request to delete the supplied filter. If @qid is
6255 * negative the delete notification is suppressed.
6257 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6259 memset(wr, 0, sizeof(*wr));
6260 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6261 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6262 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6263 FW_FILTER_WR_NOREPLY_V(qid < 0));
6264 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
6266 wr->rx_chan_rx_rpl_iq =
6267 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
6270 #define INIT_CMD(var, cmd, rd_wr) do { \
6271 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6272 FW_CMD_REQUEST_F | \
6273 FW_CMD_##rd_wr##_F); \
6274 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6277 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6281 struct fw_ldst_cmd c;
6283 memset(&c, 0, sizeof(c));
6284 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6285 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6289 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6290 c.u.addrval.addr = cpu_to_be32(addr);
6291 c.u.addrval.val = cpu_to_be32(val);
6293 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6297 * t4_mdio_rd - read a PHY register through MDIO
6298 * @adap: the adapter
6299 * @mbox: mailbox to use for the FW command
6300 * @phy_addr: the PHY address
6301 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6302 * @reg: the register to read
6303 * @valp: where to store the value
6305 * Issues a FW command through the given mailbox to read a PHY register.
6307 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6308 unsigned int mmd, unsigned int reg, u16 *valp)
6312 struct fw_ldst_cmd c;
6314 memset(&c, 0, sizeof(c));
6315 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6316 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6317 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6319 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6320 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6321 FW_LDST_CMD_MMD_V(mmd));
6322 c.u.mdio.raddr = cpu_to_be16(reg);
6324 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6326 *valp = be16_to_cpu(c.u.mdio.rval);
6331 * t4_mdio_wr - write a PHY register through MDIO
6332 * @adap: the adapter
6333 * @mbox: mailbox to use for the FW command
6334 * @phy_addr: the PHY address
6335 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6336 * @reg: the register to write
6337 * @valp: value to write
6339 * Issues a FW command through the given mailbox to write a PHY register.
6341 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6342 unsigned int mmd, unsigned int reg, u16 val)
6345 struct fw_ldst_cmd c;
6347 memset(&c, 0, sizeof(c));
6348 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6349 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6350 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6352 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6353 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6354 FW_LDST_CMD_MMD_V(mmd));
6355 c.u.mdio.raddr = cpu_to_be16(reg);
6356 c.u.mdio.rval = cpu_to_be16(val);
6358 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6362 * t4_sge_decode_idma_state - decode the idma state
6363 * @adap: the adapter
6364 * @state: the state idma is stuck in
6366 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6368 static const char * const t4_decode[] = {
6370 "IDMA_PUSH_MORE_CPL_FIFO",
6371 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6373 "IDMA_PHYSADDR_SEND_PCIEHDR",
6374 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6375 "IDMA_PHYSADDR_SEND_PAYLOAD",
6376 "IDMA_SEND_FIFO_TO_IMSG",
6377 "IDMA_FL_REQ_DATA_FL_PREP",
6378 "IDMA_FL_REQ_DATA_FL",
6380 "IDMA_FL_H_REQ_HEADER_FL",
6381 "IDMA_FL_H_SEND_PCIEHDR",
6382 "IDMA_FL_H_PUSH_CPL_FIFO",
6383 "IDMA_FL_H_SEND_CPL",
6384 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6385 "IDMA_FL_H_SEND_IP_HDR",
6386 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6387 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6388 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6389 "IDMA_FL_D_SEND_PCIEHDR",
6390 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6391 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6392 "IDMA_FL_SEND_PCIEHDR",
6393 "IDMA_FL_PUSH_CPL_FIFO",
6395 "IDMA_FL_SEND_PAYLOAD_FIRST",
6396 "IDMA_FL_SEND_PAYLOAD",
6397 "IDMA_FL_REQ_NEXT_DATA_FL",
6398 "IDMA_FL_SEND_NEXT_PCIEHDR",
6399 "IDMA_FL_SEND_PADDING",
6400 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6401 "IDMA_FL_SEND_FIFO_TO_IMSG",
6402 "IDMA_FL_REQ_DATAFL_DONE",
6403 "IDMA_FL_REQ_HEADERFL_DONE",
6405 static const char * const t5_decode[] = {
6408 "IDMA_PUSH_MORE_CPL_FIFO",
6409 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6410 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6411 "IDMA_PHYSADDR_SEND_PCIEHDR",
6412 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6413 "IDMA_PHYSADDR_SEND_PAYLOAD",
6414 "IDMA_SEND_FIFO_TO_IMSG",
6415 "IDMA_FL_REQ_DATA_FL",
6417 "IDMA_FL_DROP_SEND_INC",
6418 "IDMA_FL_H_REQ_HEADER_FL",
6419 "IDMA_FL_H_SEND_PCIEHDR",
6420 "IDMA_FL_H_PUSH_CPL_FIFO",
6421 "IDMA_FL_H_SEND_CPL",
6422 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6423 "IDMA_FL_H_SEND_IP_HDR",
6424 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6425 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6426 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6427 "IDMA_FL_D_SEND_PCIEHDR",
6428 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6429 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6430 "IDMA_FL_SEND_PCIEHDR",
6431 "IDMA_FL_PUSH_CPL_FIFO",
6433 "IDMA_FL_SEND_PAYLOAD_FIRST",
6434 "IDMA_FL_SEND_PAYLOAD",
6435 "IDMA_FL_REQ_NEXT_DATA_FL",
6436 "IDMA_FL_SEND_NEXT_PCIEHDR",
6437 "IDMA_FL_SEND_PADDING",
6438 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6440 static const char * const t6_decode[] = {
6442 "IDMA_PUSH_MORE_CPL_FIFO",
6443 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6444 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6445 "IDMA_PHYSADDR_SEND_PCIEHDR",
6446 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6447 "IDMA_PHYSADDR_SEND_PAYLOAD",
6448 "IDMA_FL_REQ_DATA_FL",
6450 "IDMA_FL_DROP_SEND_INC",
6451 "IDMA_FL_H_REQ_HEADER_FL",
6452 "IDMA_FL_H_SEND_PCIEHDR",
6453 "IDMA_FL_H_PUSH_CPL_FIFO",
6454 "IDMA_FL_H_SEND_CPL",
6455 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6456 "IDMA_FL_H_SEND_IP_HDR",
6457 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6458 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6459 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6460 "IDMA_FL_D_SEND_PCIEHDR",
6461 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6462 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6463 "IDMA_FL_SEND_PCIEHDR",
6464 "IDMA_FL_PUSH_CPL_FIFO",
6466 "IDMA_FL_SEND_PAYLOAD_FIRST",
6467 "IDMA_FL_SEND_PAYLOAD",
6468 "IDMA_FL_REQ_NEXT_DATA_FL",
6469 "IDMA_FL_SEND_NEXT_PCIEHDR",
6470 "IDMA_FL_SEND_PADDING",
6471 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6473 static const u32 sge_regs[] = {
6474 SGE_DEBUG_DATA_LOW_INDEX_2_A,
6475 SGE_DEBUG_DATA_LOW_INDEX_3_A,
6476 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
6478 const char **sge_idma_decode;
6479 int sge_idma_decode_nstates;
6481 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6483 /* Select the right set of decode strings to dump depending on the
6484 * adapter chip type.
6486 switch (chip_version) {
6488 sge_idma_decode = (const char **)t4_decode;
6489 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6493 sge_idma_decode = (const char **)t5_decode;
6494 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6498 sge_idma_decode = (const char **)t6_decode;
6499 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6503 dev_err(adapter->pdev_dev,
6504 "Unsupported chip version %d\n", chip_version);
6508 if (is_t4(adapter->params.chip)) {
6509 sge_idma_decode = (const char **)t4_decode;
6510 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6512 sge_idma_decode = (const char **)t5_decode;
6513 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6516 if (state < sge_idma_decode_nstates)
6517 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6519 CH_WARN(adapter, "idma state %d unknown\n", state);
6521 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6522 CH_WARN(adapter, "SGE register %#x value %#x\n",
6523 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6527 * t4_sge_ctxt_flush - flush the SGE context cache
6528 * @adap: the adapter
6529 * @mbox: mailbox to use for the FW command
6531 * Issues a FW command through the given mailbox to flush the
6532 * SGE context cache.
6534 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6538 struct fw_ldst_cmd c;
6540 memset(&c, 0, sizeof(c));
6541 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
6542 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6543 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6545 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6546 c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6548 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6553 * t4_fw_hello - establish communication with FW
6554 * @adap: the adapter
6555 * @mbox: mailbox to use for the FW command
6556 * @evt_mbox: mailbox to receive async FW events
6557 * @master: specifies the caller's willingness to be the device master
6558 * @state: returns the current device state (if non-NULL)
6560 * Issues a command to establish communication with FW. Returns either
6561 * an error (negative integer) or the mailbox of the Master PF.
6563 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6564 enum dev_master master, enum dev_state *state)
6567 struct fw_hello_cmd c;
6569 unsigned int master_mbox;
6570 int retries = FW_CMD_HELLO_RETRIES;
6573 memset(&c, 0, sizeof(c));
6574 INIT_CMD(c, HELLO, WRITE);
6575 c.err_to_clearinit = cpu_to_be32(
6576 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6577 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6578 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6579 mbox : FW_HELLO_CMD_MBMASTER_M) |
6580 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6581 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6582 FW_HELLO_CMD_CLEARINIT_F);
6585 * Issue the HELLO command to the firmware. If it's not successful
6586 * but indicates that we got a "busy" or "timeout" condition, retry
6587 * the HELLO until we exhaust our retry limit. If we do exceed our
6588 * retry limit, check to see if the firmware left us any error
6589 * information and report that if so.
6591 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6593 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6595 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6596 t4_report_fw_error(adap);
6600 v = be32_to_cpu(c.err_to_clearinit);
6601 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6603 if (v & FW_HELLO_CMD_ERR_F)
6604 *state = DEV_STATE_ERR;
6605 else if (v & FW_HELLO_CMD_INIT_F)
6606 *state = DEV_STATE_INIT;
6608 *state = DEV_STATE_UNINIT;
6612 * If we're not the Master PF then we need to wait around for the
6613 * Master PF Driver to finish setting up the adapter.
6615 * Note that we also do this wait if we're a non-Master-capable PF and
6616 * there is no current Master PF; a Master PF may show up momentarily
6617 * and we wouldn't want to fail pointlessly. (This can happen when an
6618 * OS loads lots of different drivers rapidly at the same time). In
6619 * this case, the Master PF returned by the firmware will be
6620 * PCIE_FW_MASTER_M so the test below will work ...
6622 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6623 master_mbox != mbox) {
6624 int waiting = FW_CMD_HELLO_TIMEOUT;
6627 * Wait for the firmware to either indicate an error or
6628 * initialized state. If we see either of these we bail out
6629 * and report the issue to the caller. If we exhaust the
6630 * "hello timeout" and we haven't exhausted our retries, try
6631 * again. Otherwise bail with a timeout error.
6640 * If neither Error nor Initialialized are indicated
6641 * by the firmware keep waiting till we exaust our
6642 * timeout ... and then retry if we haven't exhausted
6645 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6646 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6657 * We either have an Error or Initialized condition
6658 * report errors preferentially.
6661 if (pcie_fw & PCIE_FW_ERR_F)
6662 *state = DEV_STATE_ERR;
6663 else if (pcie_fw & PCIE_FW_INIT_F)
6664 *state = DEV_STATE_INIT;
6668 * If we arrived before a Master PF was selected and
6669 * there's not a valid Master PF, grab its identity
6672 if (master_mbox == PCIE_FW_MASTER_M &&
6673 (pcie_fw & PCIE_FW_MASTER_VLD_F))
6674 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6683 * t4_fw_bye - end communication with FW
6684 * @adap: the adapter
6685 * @mbox: mailbox to use for the FW command
6687 * Issues a command to terminate communication with FW.
6689 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6691 struct fw_bye_cmd c;
6693 memset(&c, 0, sizeof(c));
6694 INIT_CMD(c, BYE, WRITE);
6695 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6699 * t4_init_cmd - ask FW to initialize the device
6700 * @adap: the adapter
6701 * @mbox: mailbox to use for the FW command
6703 * Issues a command to FW to partially initialize the device. This
6704 * performs initialization that generally doesn't depend on user input.
6706 int t4_early_init(struct adapter *adap, unsigned int mbox)
6708 struct fw_initialize_cmd c;
6710 memset(&c, 0, sizeof(c));
6711 INIT_CMD(c, INITIALIZE, WRITE);
6712 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6716 * t4_fw_reset - issue a reset to FW
6717 * @adap: the adapter
6718 * @mbox: mailbox to use for the FW command
6719 * @reset: specifies the type of reset to perform
6721 * Issues a reset command of the specified type to FW.
6723 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6725 struct fw_reset_cmd c;
6727 memset(&c, 0, sizeof(c));
6728 INIT_CMD(c, RESET, WRITE);
6729 c.val = cpu_to_be32(reset);
6730 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6734 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6735 * @adap: the adapter
6736 * @mbox: mailbox to use for the FW RESET command (if desired)
6737 * @force: force uP into RESET even if FW RESET command fails
6739 * Issues a RESET command to firmware (if desired) with a HALT indication
6740 * and then puts the microprocessor into RESET state. The RESET command
6741 * will only be issued if a legitimate mailbox is provided (mbox <=
6742 * PCIE_FW_MASTER_M).
6744 * This is generally used in order for the host to safely manipulate the
6745 * adapter without fear of conflicting with whatever the firmware might
6746 * be doing. The only way out of this state is to RESTART the firmware
6749 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6754 * If a legitimate mailbox is provided, issue a RESET command
6755 * with a HALT indication.
6757 if (mbox <= PCIE_FW_MASTER_M) {
6758 struct fw_reset_cmd c;
6760 memset(&c, 0, sizeof(c));
6761 INIT_CMD(c, RESET, WRITE);
6762 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
6763 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
6764 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6768 * Normally we won't complete the operation if the firmware RESET
6769 * command fails but if our caller insists we'll go ahead and put the
6770 * uP into RESET. This can be useful if the firmware is hung or even
6771 * missing ... We'll have to take the risk of putting the uP into
6772 * RESET without the cooperation of firmware in that case.
6774 * We also force the firmware's HALT flag to be on in case we bypassed
6775 * the firmware RESET command above or we're dealing with old firmware
6776 * which doesn't have the HALT capability. This will serve as a flag
6777 * for the incoming firmware to know that it's coming out of a HALT
6778 * rather than a RESET ... if it's new enough to understand that ...
6780 if (ret == 0 || force) {
6781 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
6782 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
6787 * And we always return the result of the firmware RESET command
6788 * even when we force the uP into RESET ...
6794 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6795 * @adap: the adapter
6796 * @reset: if we want to do a RESET to restart things
6798 * Restart firmware previously halted by t4_fw_halt(). On successful
6799 * return the previous PF Master remains as the new PF Master and there
6800 * is no need to issue a new HELLO command, etc.
6802 * We do this in two ways:
6804 * 1. If we're dealing with newer firmware we'll simply want to take
6805 * the chip's microprocessor out of RESET. This will cause the
6806 * firmware to start up from its start vector. And then we'll loop
6807 * until the firmware indicates it's started again (PCIE_FW.HALT
6808 * reset to 0) or we timeout.
6810 * 2. If we're dealing with older firmware then we'll need to RESET
6811 * the chip since older firmware won't recognize the PCIE_FW.HALT
6812 * flag and automatically RESET itself on startup.
6814 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6818 * Since we're directing the RESET instead of the firmware
6819 * doing it automatically, we need to clear the PCIE_FW.HALT
6822 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
6825 * If we've been given a valid mailbox, first try to get the
6826 * firmware to do the RESET. If that works, great and we can
6827 * return success. Otherwise, if we haven't been given a
6828 * valid mailbox or the RESET command failed, fall back to
6829 * hitting the chip with a hammer.
6831 if (mbox <= PCIE_FW_MASTER_M) {
6832 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6834 if (t4_fw_reset(adap, mbox,
6835 PIORST_F | PIORSTMODE_F) == 0)
6839 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
6844 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6845 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6846 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
6857 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6858 * @adap: the adapter
6859 * @mbox: mailbox to use for the FW RESET command (if desired)
6860 * @fw_data: the firmware image to write
6862 * @force: force upgrade even if firmware doesn't cooperate
6864 * Perform all of the steps necessary for upgrading an adapter's
6865 * firmware image. Normally this requires the cooperation of the
6866 * existing firmware in order to halt all existing activities
6867 * but if an invalid mailbox token is passed in we skip that step
6868 * (though we'll still put the adapter microprocessor into RESET in
6871 * On successful return the new firmware will have been loaded and
6872 * the adapter will have been fully RESET losing all previous setup
6873 * state. On unsuccessful return the adapter may be completely hosed ...
6874 * positive errno indicates that the adapter is ~probably~ intact, a
6875 * negative errno indicates that things are looking bad ...
6877 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6878 const u8 *fw_data, unsigned int size, int force)
6880 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6883 if (!t4_fw_matches_chip(adap, fw_hdr))
6886 /* Disable FW_OK flag so that mbox commands with FW_OK flag set
6887 * wont be sent when we are flashing FW.
6889 adap->flags &= ~FW_OK;
6891 ret = t4_fw_halt(adap, mbox, force);
6892 if (ret < 0 && !force)
6895 ret = t4_load_fw(adap, fw_data, size);
6900 * If there was a Firmware Configuration File stored in FLASH,
6901 * there's a good chance that it won't be compatible with the new
6902 * Firmware. In order to prevent difficult to diagnose adapter
6903 * initialization issues, we clear out the Firmware Configuration File
6904 * portion of the FLASH . The user will need to re-FLASH a new
6905 * Firmware Configuration File which is compatible with the new
6906 * Firmware if that's desired.
6908 (void)t4_load_cfg(adap, NULL, 0);
6911 * Older versions of the firmware don't understand the new
6912 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6913 * restart. So for newly loaded older firmware we'll have to do the
6914 * RESET for it so it starts up on a clean slate. We can tell if
6915 * the newly loaded firmware will handle this right by checking
6916 * its header flags to see if it advertises the capability.
6918 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6919 ret = t4_fw_restart(adap, mbox, reset);
6921 /* Grab potentially new Firmware Device Log parameters so we can see
6922 * how healthy the new Firmware is. It's okay to contact the new
6923 * Firmware for these parameters even though, as far as it's
6924 * concerned, we've never said "HELLO" to it ...
6926 (void)t4_init_devlog_params(adap);
6928 adap->flags |= FW_OK;
6933 * t4_fl_pkt_align - return the fl packet alignment
6934 * @adap: the adapter
6936 * T4 has a single field to specify the packing and padding boundary.
6937 * T5 onwards has separate fields for this and hence the alignment for
6938 * next packet offset is maximum of these two.
6941 int t4_fl_pkt_align(struct adapter *adap)
6943 u32 sge_control, sge_control2;
6944 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
6946 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
6948 /* T4 uses a single control field to specify both the PCIe Padding and
6949 * Packing Boundary. T5 introduced the ability to specify these
6950 * separately. The actual Ingress Packet Data alignment boundary
6951 * within Packed Buffer Mode is the maximum of these two
6952 * specifications. (Note that it makes no real practical sense to
6953 * have the Pading Boudary be larger than the Packing Boundary but you
6954 * could set the chip up that way and, in fact, legacy T4 code would
6955 * end doing this because it would initialize the Padding Boundary and
6956 * leave the Packing Boundary initialized to 0 (16 bytes).)
6957 * Padding Boundary values in T6 starts from 8B,
6958 * where as it is 32B for T4 and T5.
6960 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
6961 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
6963 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
6965 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
6967 fl_align = ingpadboundary;
6968 if (!is_t4(adap->params.chip)) {
6969 /* T5 has a weird interpretation of one of the PCIe Packing
6970 * Boundary values. No idea why ...
6972 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
6973 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
6974 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
6975 ingpackboundary = 16;
6977 ingpackboundary = 1 << (ingpackboundary +
6978 INGPACKBOUNDARY_SHIFT_X);
6980 fl_align = max(ingpadboundary, ingpackboundary);
6986 * t4_fixup_host_params - fix up host-dependent parameters
6987 * @adap: the adapter
6988 * @page_size: the host's Base Page Size
6989 * @cache_line_size: the host's Cache Line Size
6991 * Various registers in T4 contain values which are dependent on the
6992 * host's Base Page and Cache Line Sizes. This function will fix all of
6993 * those registers with the appropriate values as passed in ...
6995 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
6996 unsigned int cache_line_size)
6998 unsigned int page_shift = fls(page_size) - 1;
6999 unsigned int sge_hps = page_shift - 10;
7000 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7001 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7002 unsigned int fl_align_log = fls(fl_align) - 1;
7004 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7005 HOSTPAGESIZEPF0_V(sge_hps) |
7006 HOSTPAGESIZEPF1_V(sge_hps) |
7007 HOSTPAGESIZEPF2_V(sge_hps) |
7008 HOSTPAGESIZEPF3_V(sge_hps) |
7009 HOSTPAGESIZEPF4_V(sge_hps) |
7010 HOSTPAGESIZEPF5_V(sge_hps) |
7011 HOSTPAGESIZEPF6_V(sge_hps) |
7012 HOSTPAGESIZEPF7_V(sge_hps));
7014 if (is_t4(adap->params.chip)) {
7015 t4_set_reg_field(adap, SGE_CONTROL_A,
7016 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7017 EGRSTATUSPAGESIZE_F,
7018 INGPADBOUNDARY_V(fl_align_log -
7019 INGPADBOUNDARY_SHIFT_X) |
7020 EGRSTATUSPAGESIZE_V(stat_len != 64));
7022 unsigned int pack_align;
7023 unsigned int ingpad, ingpack;
7024 unsigned int pcie_cap;
7026 /* T5 introduced the separation of the Free List Padding and
7027 * Packing Boundaries. Thus, we can select a smaller Padding
7028 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7029 * Bandwidth, and use a Packing Boundary which is large enough
7030 * to avoid false sharing between CPUs, etc.
7032 * For the PCI Link, the smaller the Padding Boundary the
7033 * better. For the Memory Controller, a smaller Padding
7034 * Boundary is better until we cross under the Memory Line
7035 * Size (the minimum unit of transfer to/from Memory). If we
7036 * have a Padding Boundary which is smaller than the Memory
7037 * Line Size, that'll involve a Read-Modify-Write cycle on the
7038 * Memory Controller which is never good.
7041 /* We want the Packing Boundary to be based on the Cache Line
7042 * Size in order to help avoid False Sharing performance
7043 * issues between CPUs, etc. We also want the Packing
7044 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7045 * get best performance when the Packing Boundary is a
7046 * multiple of the Maximum Payload Size.
7048 pack_align = fl_align;
7049 pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
7051 unsigned int mps, mps_log;
7054 /* The PCIe Device Control Maximum Payload Size field
7055 * [bits 7:5] encodes sizes as powers of 2 starting at
7058 pci_read_config_word(adap->pdev,
7059 pcie_cap + PCI_EXP_DEVCTL,
7061 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7063 if (mps > pack_align)
7067 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7068 * value for the Packing Boundary. This corresponds to 16
7069 * bytes instead of the expected 32 bytes. So if we want 32
7070 * bytes, the best we can really do is 64 bytes ...
7072 if (pack_align <= 16) {
7073 ingpack = INGPACKBOUNDARY_16B_X;
7075 } else if (pack_align == 32) {
7076 ingpack = INGPACKBOUNDARY_64B_X;
7079 unsigned int pack_align_log = fls(pack_align) - 1;
7081 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7082 fl_align = pack_align;
7085 /* Use the smallest Ingress Padding which isn't smaller than
7086 * the Memory Controller Read/Write Size. We'll take that as
7087 * being 8 bytes since we don't know of any system with a
7088 * wider Memory Controller Bus Width.
7090 if (is_t5(adap->params.chip))
7091 ingpad = INGPADBOUNDARY_32B_X;
7093 ingpad = T6_INGPADBOUNDARY_8B_X;
7095 t4_set_reg_field(adap, SGE_CONTROL_A,
7096 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7097 EGRSTATUSPAGESIZE_F,
7098 INGPADBOUNDARY_V(ingpad) |
7099 EGRSTATUSPAGESIZE_V(stat_len != 64));
7100 t4_set_reg_field(adap, SGE_CONTROL2_A,
7101 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
7102 INGPACKBOUNDARY_V(ingpack));
7105 * Adjust various SGE Free List Host Buffer Sizes.
7107 * This is something of a crock since we're using fixed indices into
7108 * the array which are also known by the sge.c code and the T4
7109 * Firmware Configuration File. We need to come up with a much better
7110 * approach to managing this array. For now, the first four entries
7115 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7116 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7118 * For the single-MTU buffers in unpacked mode we need to include
7119 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7120 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7121 * Padding boundary. All of these are accommodated in the Factory
7122 * Default Firmware Configuration File but we need to adjust it for
7123 * this host's cache line size.
7125 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7126 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7127 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
7129 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7130 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
7133 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
7139 * t4_fw_initialize - ask FW to initialize the device
7140 * @adap: the adapter
7141 * @mbox: mailbox to use for the FW command
7143 * Issues a command to FW to partially initialize the device. This
7144 * performs initialization that generally doesn't depend on user input.
7146 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7148 struct fw_initialize_cmd c;
7150 memset(&c, 0, sizeof(c));
7151 INIT_CMD(c, INITIALIZE, WRITE);
7152 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7156 * t4_query_params_rw - query FW or device parameters
7157 * @adap: the adapter
7158 * @mbox: mailbox to use for the FW command
7161 * @nparams: the number of parameters
7162 * @params: the parameter names
7163 * @val: the parameter values
7164 * @rw: Write and read flag
7165 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7167 * Reads the value of FW or device parameters. Up to 7 parameters can be
7170 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7171 unsigned int vf, unsigned int nparams, const u32 *params,
7172 u32 *val, int rw, bool sleep_ok)
7175 struct fw_params_cmd c;
7176 __be32 *p = &c.param[0].mnem;
7181 memset(&c, 0, sizeof(c));
7182 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7183 FW_CMD_REQUEST_F | FW_CMD_READ_F |
7184 FW_PARAMS_CMD_PFN_V(pf) |
7185 FW_PARAMS_CMD_VFN_V(vf));
7186 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7188 for (i = 0; i < nparams; i++) {
7189 *p++ = cpu_to_be32(*params++);
7191 *p = cpu_to_be32(*(val + i));
7195 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7197 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7198 *val++ = be32_to_cpu(*p);
7202 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7203 unsigned int vf, unsigned int nparams, const u32 *params,
7206 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7210 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7211 unsigned int vf, unsigned int nparams, const u32 *params,
7214 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7219 * t4_set_params_timeout - sets FW or device parameters
7220 * @adap: the adapter
7221 * @mbox: mailbox to use for the FW command
7224 * @nparams: the number of parameters
7225 * @params: the parameter names
7226 * @val: the parameter values
7227 * @timeout: the timeout time
7229 * Sets the value of FW or device parameters. Up to 7 parameters can be
7230 * specified at once.
7232 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7233 unsigned int pf, unsigned int vf,
7234 unsigned int nparams, const u32 *params,
7235 const u32 *val, int timeout)
7237 struct fw_params_cmd c;
7238 __be32 *p = &c.param[0].mnem;
7243 memset(&c, 0, sizeof(c));
7244 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7245 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7246 FW_PARAMS_CMD_PFN_V(pf) |
7247 FW_PARAMS_CMD_VFN_V(vf));
7248 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7251 *p++ = cpu_to_be32(*params++);
7252 *p++ = cpu_to_be32(*val++);
7255 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7259 * t4_set_params - sets FW or device parameters
7260 * @adap: the adapter
7261 * @mbox: mailbox to use for the FW command
7264 * @nparams: the number of parameters
7265 * @params: the parameter names
7266 * @val: the parameter values
7268 * Sets the value of FW or device parameters. Up to 7 parameters can be
7269 * specified at once.
7271 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7272 unsigned int vf, unsigned int nparams, const u32 *params,
7275 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7276 FW_CMD_MAX_TIMEOUT);
7280 * t4_cfg_pfvf - configure PF/VF resource limits
7281 * @adap: the adapter
7282 * @mbox: mailbox to use for the FW command
7283 * @pf: the PF being configured
7284 * @vf: the VF being configured
7285 * @txq: the max number of egress queues
7286 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7287 * @rxqi: the max number of interrupt-capable ingress queues
7288 * @rxq: the max number of interruptless ingress queues
7289 * @tc: the PCI traffic class
7290 * @vi: the max number of virtual interfaces
7291 * @cmask: the channel access rights mask for the PF/VF
7292 * @pmask: the port access rights mask for the PF/VF
7293 * @nexact: the maximum number of exact MPS filters
7294 * @rcaps: read capabilities
7295 * @wxcaps: write/execute capabilities
7297 * Configures resource limits and capabilities for a physical or virtual
7300 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7301 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7302 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7303 unsigned int vi, unsigned int cmask, unsigned int pmask,
7304 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7306 struct fw_pfvf_cmd c;
7308 memset(&c, 0, sizeof(c));
7309 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7310 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7311 FW_PFVF_CMD_VFN_V(vf));
7312 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7313 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7314 FW_PFVF_CMD_NIQ_V(rxq));
7315 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7316 FW_PFVF_CMD_PMASK_V(pmask) |
7317 FW_PFVF_CMD_NEQ_V(txq));
7318 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7319 FW_PFVF_CMD_NVI_V(vi) |
7320 FW_PFVF_CMD_NEXACTF_V(nexact));
7321 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7322 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7323 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
7324 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7328 * t4_alloc_vi - allocate a virtual interface
7329 * @adap: the adapter
7330 * @mbox: mailbox to use for the FW command
7331 * @port: physical port associated with the VI
7332 * @pf: the PF owning the VI
7333 * @vf: the VF owning the VI
7334 * @nmac: number of MAC addresses needed (1 to 5)
7335 * @mac: the MAC addresses of the VI
7336 * @rss_size: size of RSS table slice associated with this VI
7338 * Allocates a virtual interface for the given physical port. If @mac is
7339 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7340 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7341 * stored consecutively so the space needed is @nmac * 6 bytes.
7342 * Returns a negative error number or the non-negative VI id.
7344 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7345 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7346 unsigned int *rss_size)
7351 memset(&c, 0, sizeof(c));
7352 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7353 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7354 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7355 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
7356 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
7359 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7364 memcpy(mac, c.mac, sizeof(c.mac));
7367 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7369 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7371 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7373 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7377 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7378 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
7382 * t4_free_vi - free a virtual interface
7383 * @adap: the adapter
7384 * @mbox: mailbox to use for the FW command
7385 * @pf: the PF owning the VI
7386 * @vf: the VF owning the VI
7387 * @viid: virtual interface identifiler
7389 * Free a previously allocated virtual interface.
7391 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7392 unsigned int vf, unsigned int viid)
7396 memset(&c, 0, sizeof(c));
7397 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7400 FW_VI_CMD_PFN_V(pf) |
7401 FW_VI_CMD_VFN_V(vf));
7402 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7403 c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7405 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7409 * t4_set_rxmode - set Rx properties of a virtual interface
7410 * @adap: the adapter
7411 * @mbox: mailbox to use for the FW command
7413 * @mtu: the new MTU or -1
7414 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7415 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7416 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7417 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7418 * @sleep_ok: if true we may sleep while awaiting command completion
7420 * Sets Rx properties of a virtual interface.
7422 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7423 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7426 struct fw_vi_rxmode_cmd c;
7428 /* convert to FW values */
7430 mtu = FW_RXMODE_MTU_NO_CHG;
7432 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
7434 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
7436 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
7438 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
7440 memset(&c, 0, sizeof(c));
7441 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7442 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7443 FW_VI_RXMODE_CMD_VIID_V(viid));
7444 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7446 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7447 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7448 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7449 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7450 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
7451 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7455 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7456 * @adap: the adapter
7457 * @mbox: mailbox to use for the FW command
7459 * @free: if true any existing filters for this VI id are first removed
7460 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7461 * @addr: the MAC address(es)
7462 * @idx: where to store the index of each allocated filter
7463 * @hash: pointer to hash address filter bitmap
7464 * @sleep_ok: call is allowed to sleep
7466 * Allocates an exact-match filter for each of the supplied addresses and
7467 * sets it to the corresponding address. If @idx is not %NULL it should
7468 * have at least @naddr entries, each of which will be set to the index of
7469 * the filter allocated for the corresponding MAC address. If a filter
7470 * could not be allocated for an address its index is set to 0xffff.
7471 * If @hash is not %NULL addresses that fail to allocate an exact filter
7472 * are hashed and update the hash filter bitmap pointed at by @hash.
7474 * Returns a negative error number or the number of filters allocated.
7476 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7477 unsigned int viid, bool free, unsigned int naddr,
7478 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7480 int offset, ret = 0;
7481 struct fw_vi_mac_cmd c;
7482 unsigned int nfilters = 0;
7483 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
7484 unsigned int rem = naddr;
7486 if (naddr > max_naddr)
7489 for (offset = 0; offset < naddr ; /**/) {
7490 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
7491 rem : ARRAY_SIZE(c.u.exact));
7492 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7493 u.exact[fw_naddr]), 16);
7494 struct fw_vi_mac_exact *p;
7497 memset(&c, 0, sizeof(c));
7498 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7501 FW_CMD_EXEC_V(free) |
7502 FW_VI_MAC_CMD_VIID_V(viid));
7503 c.freemacs_to_len16 =
7504 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
7505 FW_CMD_LEN16_V(len16));
7507 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7509 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7510 FW_VI_MAC_CMD_IDX_V(
7511 FW_VI_MAC_ADD_MAC));
7512 memcpy(p->macaddr, addr[offset + i],
7513 sizeof(p->macaddr));
7516 /* It's okay if we run out of space in our MAC address arena.
7517 * Some of the addresses we submit may get stored so we need
7518 * to run through the reply to see what the results were ...
7520 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7521 if (ret && ret != -FW_ENOMEM)
7524 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7525 u16 index = FW_VI_MAC_CMD_IDX_G(
7526 be16_to_cpu(p->valid_to_idx));
7529 idx[offset + i] = (index >= max_naddr ?
7531 if (index < max_naddr)
7535 hash_mac_addr(addr[offset + i]));
7543 if (ret == 0 || ret == -FW_ENOMEM)
7549 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
7550 * @adap: the adapter
7551 * @mbox: mailbox to use for the FW command
7553 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7554 * @addr: the MAC address(es)
7555 * @sleep_ok: call is allowed to sleep
7557 * Frees the exact-match filter for each of the supplied addresses
7559 * Returns a negative error number or the number of filters freed.
7561 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
7562 unsigned int viid, unsigned int naddr,
7563 const u8 **addr, bool sleep_ok)
7565 int offset, ret = 0;
7566 struct fw_vi_mac_cmd c;
7567 unsigned int nfilters = 0;
7568 unsigned int max_naddr = is_t4(adap->params.chip) ?
7569 NUM_MPS_CLS_SRAM_L_INSTANCES :
7570 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
7571 unsigned int rem = naddr;
7573 if (naddr > max_naddr)
7576 for (offset = 0; offset < (int)naddr ; /**/) {
7577 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7579 : ARRAY_SIZE(c.u.exact));
7580 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7581 u.exact[fw_naddr]), 16);
7582 struct fw_vi_mac_exact *p;
7585 memset(&c, 0, sizeof(c));
7586 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7590 FW_VI_MAC_CMD_VIID_V(viid));
7591 c.freemacs_to_len16 =
7592 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7593 FW_CMD_LEN16_V(len16));
7595 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
7596 p->valid_to_idx = cpu_to_be16(
7597 FW_VI_MAC_CMD_VALID_F |
7598 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
7599 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7602 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7606 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7607 u16 index = FW_VI_MAC_CMD_IDX_G(
7608 be16_to_cpu(p->valid_to_idx));
7610 if (index < max_naddr)
7624 * t4_change_mac - modifies the exact-match filter for a MAC address
7625 * @adap: the adapter
7626 * @mbox: mailbox to use for the FW command
7628 * @idx: index of existing filter for old value of MAC address, or -1
7629 * @addr: the new MAC address value
7630 * @persist: whether a new MAC allocation should be persistent
7631 * @add_smt: if true also add the address to the HW SMT
7633 * Modifies an exact-match filter and sets it to the new MAC address.
7634 * Note that in general it is not possible to modify the value of a given
7635 * filter so the generic way to modify an address filter is to free the one
7636 * being used by the old address value and allocate a new filter for the
7637 * new address value. @idx can be -1 if the address is a new addition.
7639 * Returns a negative error number or the index of the filter with the new
7642 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7643 int idx, const u8 *addr, bool persist, bool add_smt)
7646 struct fw_vi_mac_cmd c;
7647 struct fw_vi_mac_exact *p = c.u.exact;
7648 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
7650 if (idx < 0) /* new allocation */
7651 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7652 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7654 memset(&c, 0, sizeof(c));
7655 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7656 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7657 FW_VI_MAC_CMD_VIID_V(viid));
7658 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
7659 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7660 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
7661 FW_VI_MAC_CMD_IDX_V(idx));
7662 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7664 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7666 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7667 if (ret >= max_mac_addr)
7674 * t4_set_addr_hash - program the MAC inexact-match hash filter
7675 * @adap: the adapter
7676 * @mbox: mailbox to use for the FW command
7678 * @ucast: whether the hash filter should also match unicast addresses
7679 * @vec: the value to be written to the hash filter
7680 * @sleep_ok: call is allowed to sleep
7682 * Sets the 64-bit inexact-match hash filter for a virtual interface.
7684 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7685 bool ucast, u64 vec, bool sleep_ok)
7687 struct fw_vi_mac_cmd c;
7689 memset(&c, 0, sizeof(c));
7690 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7691 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7692 FW_VI_ENABLE_CMD_VIID_V(viid));
7693 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
7694 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
7696 c.u.hash.hashvec = cpu_to_be64(vec);
7697 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7701 * t4_enable_vi_params - enable/disable a virtual interface
7702 * @adap: the adapter
7703 * @mbox: mailbox to use for the FW command
7705 * @rx_en: 1=enable Rx, 0=disable Rx
7706 * @tx_en: 1=enable Tx, 0=disable Tx
7707 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7709 * Enables/disables a virtual interface. Note that setting DCB Enable
7710 * only makes sense when enabling a Virtual Interface ...
7712 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7713 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7715 struct fw_vi_enable_cmd c;
7717 memset(&c, 0, sizeof(c));
7718 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7719 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7720 FW_VI_ENABLE_CMD_VIID_V(viid));
7721 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
7722 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
7723 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
7725 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7729 * t4_enable_vi - enable/disable a virtual interface
7730 * @adap: the adapter
7731 * @mbox: mailbox to use for the FW command
7733 * @rx_en: 1=enable Rx, 0=disable Rx
7734 * @tx_en: 1=enable Tx, 0=disable Tx
7736 * Enables/disables a virtual interface.
7738 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7739 bool rx_en, bool tx_en)
7741 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7745 * t4_identify_port - identify a VI's port by blinking its LED
7746 * @adap: the adapter
7747 * @mbox: mailbox to use for the FW command
7749 * @nblinks: how many times to blink LED at 2.5 Hz
7751 * Identifies a VI's port by blinking its LED.
7753 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7754 unsigned int nblinks)
7756 struct fw_vi_enable_cmd c;
7758 memset(&c, 0, sizeof(c));
7759 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7760 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7761 FW_VI_ENABLE_CMD_VIID_V(viid));
7762 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
7763 c.blinkdur = cpu_to_be16(nblinks);
7764 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7768 * t4_iq_stop - stop an ingress queue and its FLs
7769 * @adap: the adapter
7770 * @mbox: mailbox to use for the FW command
7771 * @pf: the PF owning the queues
7772 * @vf: the VF owning the queues
7773 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7774 * @iqid: ingress queue id
7775 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7776 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7778 * Stops an ingress queue and its associated FLs, if any. This causes
7779 * any current or future data/messages destined for these queues to be
7782 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7783 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7784 unsigned int fl0id, unsigned int fl1id)
7788 memset(&c, 0, sizeof(c));
7789 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7790 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7791 FW_IQ_CMD_VFN_V(vf));
7792 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
7793 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7794 c.iqid = cpu_to_be16(iqid);
7795 c.fl0id = cpu_to_be16(fl0id);
7796 c.fl1id = cpu_to_be16(fl1id);
7797 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7801 * t4_iq_free - free an ingress queue and its FLs
7802 * @adap: the adapter
7803 * @mbox: mailbox to use for the FW command
7804 * @pf: the PF owning the queues
7805 * @vf: the VF owning the queues
7806 * @iqtype: the ingress queue type
7807 * @iqid: ingress queue id
7808 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7809 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7811 * Frees an ingress queue and its associated FLs, if any.
7813 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7814 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7815 unsigned int fl0id, unsigned int fl1id)
7819 memset(&c, 0, sizeof(c));
7820 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7821 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7822 FW_IQ_CMD_VFN_V(vf));
7823 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
7824 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7825 c.iqid = cpu_to_be16(iqid);
7826 c.fl0id = cpu_to_be16(fl0id);
7827 c.fl1id = cpu_to_be16(fl1id);
7828 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7832 * t4_eth_eq_free - free an Ethernet egress queue
7833 * @adap: the adapter
7834 * @mbox: mailbox to use for the FW command
7835 * @pf: the PF owning the queue
7836 * @vf: the VF owning the queue
7837 * @eqid: egress queue id
7839 * Frees an Ethernet egress queue.
7841 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7842 unsigned int vf, unsigned int eqid)
7844 struct fw_eq_eth_cmd c;
7846 memset(&c, 0, sizeof(c));
7847 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
7848 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7849 FW_EQ_ETH_CMD_PFN_V(pf) |
7850 FW_EQ_ETH_CMD_VFN_V(vf));
7851 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
7852 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
7853 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7857 * t4_ctrl_eq_free - free a control egress queue
7858 * @adap: the adapter
7859 * @mbox: mailbox to use for the FW command
7860 * @pf: the PF owning the queue
7861 * @vf: the VF owning the queue
7862 * @eqid: egress queue id
7864 * Frees a control egress queue.
7866 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7867 unsigned int vf, unsigned int eqid)
7869 struct fw_eq_ctrl_cmd c;
7871 memset(&c, 0, sizeof(c));
7872 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
7873 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7874 FW_EQ_CTRL_CMD_PFN_V(pf) |
7875 FW_EQ_CTRL_CMD_VFN_V(vf));
7876 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
7877 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
7878 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7882 * t4_ofld_eq_free - free an offload egress queue
7883 * @adap: the adapter
7884 * @mbox: mailbox to use for the FW command
7885 * @pf: the PF owning the queue
7886 * @vf: the VF owning the queue
7887 * @eqid: egress queue id
7889 * Frees a control egress queue.
7891 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7892 unsigned int vf, unsigned int eqid)
7894 struct fw_eq_ofld_cmd c;
7896 memset(&c, 0, sizeof(c));
7897 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
7898 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7899 FW_EQ_OFLD_CMD_PFN_V(pf) |
7900 FW_EQ_OFLD_CMD_VFN_V(vf));
7901 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
7902 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
7903 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7907 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7908 * @adap: the adapter
7909 * @link_down_rc: Link Down Reason Code
7911 * Returns a string representation of the Link Down Reason Code.
7913 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
7915 static const char * const reason[] = {
7918 "Auto-negotiation Failure",
7920 "Insufficient Airflow",
7921 "Unable To Determine Reason",
7922 "No RX Signal Detected",
7926 if (link_down_rc >= ARRAY_SIZE(reason))
7927 return "Bad Reason Code";
7929 return reason[link_down_rc];
7933 * Return the highest speed set in the port capabilities, in Mb/s.
7935 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
7937 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
7939 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
7943 TEST_SPEED_RETURN(400G, 400000);
7944 TEST_SPEED_RETURN(200G, 200000);
7945 TEST_SPEED_RETURN(100G, 100000);
7946 TEST_SPEED_RETURN(50G, 50000);
7947 TEST_SPEED_RETURN(40G, 40000);
7948 TEST_SPEED_RETURN(25G, 25000);
7949 TEST_SPEED_RETURN(10G, 10000);
7950 TEST_SPEED_RETURN(1G, 1000);
7951 TEST_SPEED_RETURN(100M, 100);
7953 #undef TEST_SPEED_RETURN
7959 * fwcap_to_fwspeed - return highest speed in Port Capabilities
7960 * @acaps: advertised Port Capabilities
7962 * Get the highest speed for the port from the advertised Port
7963 * Capabilities. It will be either the highest speed from the list of
7964 * speeds or whatever user has set using ethtool.
7966 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
7968 #define TEST_SPEED_RETURN(__caps_speed) \
7970 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
7971 return FW_PORT_CAP32_SPEED_##__caps_speed; \
7974 TEST_SPEED_RETURN(400G);
7975 TEST_SPEED_RETURN(200G);
7976 TEST_SPEED_RETURN(100G);
7977 TEST_SPEED_RETURN(50G);
7978 TEST_SPEED_RETURN(40G);
7979 TEST_SPEED_RETURN(25G);
7980 TEST_SPEED_RETURN(10G);
7981 TEST_SPEED_RETURN(1G);
7982 TEST_SPEED_RETURN(100M);
7984 #undef TEST_SPEED_RETURN
7990 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
7991 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
7993 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
7994 * 32-bit Port Capabilities value.
7996 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
7998 fw_port_cap32_t linkattr = 0;
8000 /* Unfortunately the format of the Link Status in the old
8001 * 16-bit Port Information message isn't the same as the
8002 * 16-bit Port Capabilities bitfield used everywhere else ...
8004 if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8005 linkattr |= FW_PORT_CAP32_FC_RX;
8006 if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8007 linkattr |= FW_PORT_CAP32_FC_TX;
8008 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8009 linkattr |= FW_PORT_CAP32_SPEED_100M;
8010 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8011 linkattr |= FW_PORT_CAP32_SPEED_1G;
8012 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8013 linkattr |= FW_PORT_CAP32_SPEED_10G;
8014 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8015 linkattr |= FW_PORT_CAP32_SPEED_25G;
8016 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8017 linkattr |= FW_PORT_CAP32_SPEED_40G;
8018 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8019 linkattr |= FW_PORT_CAP32_SPEED_100G;
8025 * t4_handle_get_port_info - process a FW reply message
8026 * @pi: the port info
8027 * @rpl: start of the FW message
8029 * Processes a GET_PORT_INFO FW reply message.
8031 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8033 const struct fw_port_cmd *cmd = (const void *)rpl;
8034 int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8035 struct adapter *adapter = pi->adapter;
8036 struct link_config *lc = &pi->link_cfg;
8037 int link_ok, linkdnrc;
8038 enum fw_port_type port_type;
8039 enum fw_port_module_type mod_type;
8040 unsigned int speed, fc, fec;
8041 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8043 /* Extract the various fields from the Port Information message.
8046 case FW_PORT_ACTION_GET_PORT_INFO: {
8047 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8049 link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8050 linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8051 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8052 mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8053 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8054 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8055 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8056 linkattr = lstatus_to_fwcap(lstatus);
8060 case FW_PORT_ACTION_GET_PORT_INFO32: {
8063 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8064 link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8065 linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8066 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8067 mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8068 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8069 acaps = be32_to_cpu(cmd->u.info32.acaps32);
8070 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8071 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8076 dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8077 be32_to_cpu(cmd->action_to_len16));
8081 fec = fwcap_to_cc_fec(acaps);
8082 fc = fwcap_to_cc_pause(linkattr);
8083 speed = fwcap_to_speed(linkattr);
8085 if (mod_type != pi->mod_type) {
8086 /* With the newer SFP28 and QSFP28 Transceiver Module Types,
8087 * various fundamental Port Capabilities which used to be
8088 * immutable can now change radically. We can now have
8089 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8090 * all change based on what Transceiver Module is inserted.
8091 * So we need to record the Physical "Port" Capabilities on
8092 * every Transceiver Module change.
8096 /* When a new Transceiver Module is inserted, the Firmware
8097 * will examine its i2c EPROM to determine its type and
8098 * general operating parameters including things like Forward
8099 * Error Control, etc. Various IEEE 802.3 standards dictate
8100 * how to interpret these i2c values to determine default
8101 * "sutomatic" settings. We record these for future use when
8102 * the user explicitly requests these standards-based values.
8104 lc->def_acaps = acaps;
8106 /* Some versions of the early T6 Firmware "cheated" when
8107 * handling different Transceiver Modules by changing the
8108 * underlaying Port Type reported to the Host Drivers. As
8109 * such we need to capture whatever Port Type the Firmware
8110 * sends us and record it in case it's different from what we
8111 * were told earlier. Unfortunately, since Firmware is
8112 * forever, we'll need to keep this code here forever, but in
8113 * later T6 Firmware it should just be an assignment of the
8114 * same value already recorded.
8116 pi->port_type = port_type;
8118 pi->mod_type = mod_type;
8119 t4_os_portmod_changed(adapter, pi->port_id);
8122 if (link_ok != lc->link_ok || speed != lc->speed ||
8123 fc != lc->fc || fec != lc->fec) { /* something changed */
8124 if (!link_ok && lc->link_ok) {
8125 lc->link_down_rc = linkdnrc;
8126 dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
8127 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
8129 lc->link_ok = link_ok;
8134 lc->lpacaps = lpacaps;
8135 lc->acaps = acaps & ADVERT_MASK;
8137 if (lc->acaps & FW_PORT_CAP32_ANEG) {
8138 lc->autoneg = AUTONEG_ENABLE;
8140 /* When Autoneg is disabled, user needs to set
8142 * Similar to cxgb4_ethtool.c: set_link_ksettings
8145 lc->speed_caps = fwcap_to_fwspeed(acaps);
8146 lc->autoneg = AUTONEG_DISABLE;
8149 t4_os_link_changed(adapter, pi->port_id, link_ok);
8154 * t4_update_port_info - retrieve and update port information if changed
8155 * @pi: the port_info
8157 * We issue a Get Port Information Command to the Firmware and, if
8158 * successful, we check to see if anything is different from what we
8159 * last recorded and update things accordingly.
8161 int t4_update_port_info(struct port_info *pi)
8163 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8164 struct fw_port_cmd port_cmd;
8167 memset(&port_cmd, 0, sizeof(port_cmd));
8168 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8169 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8170 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8171 port_cmd.action_to_len16 = cpu_to_be32(
8172 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8173 ? FW_PORT_ACTION_GET_PORT_INFO
8174 : FW_PORT_ACTION_GET_PORT_INFO32) |
8175 FW_LEN16(port_cmd));
8176 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8177 &port_cmd, sizeof(port_cmd), &port_cmd);
8181 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8186 * t4_get_link_params - retrieve basic link parameters for given port
8188 * @link_okp: value return pointer for link up/down
8189 * @speedp: value return pointer for speed (Mb/s)
8190 * @mtup: value return pointer for mtu
8192 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8193 * and MTU for a specified port. A negative error is returned on
8194 * failure; 0 on success.
8196 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8197 unsigned int *speedp, unsigned int *mtup)
8199 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8200 struct fw_port_cmd port_cmd;
8201 unsigned int action, link_ok, speed, mtu;
8202 fw_port_cap32_t linkattr;
8205 memset(&port_cmd, 0, sizeof(port_cmd));
8206 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8207 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8208 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8209 action = (fw_caps == FW_CAPS16
8210 ? FW_PORT_ACTION_GET_PORT_INFO
8211 : FW_PORT_ACTION_GET_PORT_INFO32);
8212 port_cmd.action_to_len16 = cpu_to_be32(
8213 FW_PORT_CMD_ACTION_V(action) |
8214 FW_LEN16(port_cmd));
8215 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8216 &port_cmd, sizeof(port_cmd), &port_cmd);
8220 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8221 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8223 link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8224 linkattr = lstatus_to_fwcap(lstatus);
8225 mtu = be16_to_cpu(port_cmd.u.info.mtu);
8228 be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8230 link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8231 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8232 mtu = FW_PORT_CMD_MTU32_G(
8233 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8235 speed = fwcap_to_speed(linkattr);
8237 *link_okp = link_ok;
8238 *speedp = fwcap_to_speed(linkattr);
8245 * t4_handle_fw_rpl - process a FW reply message
8246 * @adap: the adapter
8247 * @rpl: start of the FW message
8249 * Processes a FW message, such as link state change messages.
8251 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8253 u8 opcode = *(const u8 *)rpl;
8255 /* This might be a port command ... this simplifies the following
8256 * conditionals ... We can get away with pre-dereferencing
8257 * action_to_len16 because it's in the first 16 bytes and all messages
8258 * will be at least that long.
8260 const struct fw_port_cmd *p = (const void *)rpl;
8261 unsigned int action =
8262 FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8264 if (opcode == FW_PORT_CMD &&
8265 (action == FW_PORT_ACTION_GET_PORT_INFO ||
8266 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8268 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
8269 struct port_info *pi = NULL;
8271 for_each_port(adap, i) {
8272 pi = adap2pinfo(adap, i);
8273 if (pi->tx_chan == chan)
8277 t4_handle_get_port_info(pi, rpl);
8279 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8286 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
8290 if (pci_is_pcie(adapter->pdev)) {
8291 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
8292 p->speed = val & PCI_EXP_LNKSTA_CLS;
8293 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8298 * init_link_config - initialize a link's SW state
8299 * @lc: pointer to structure holding the link state
8300 * @pcaps: link Port Capabilities
8301 * @acaps: link current Advertised Port Capabilities
8303 * Initializes the SW state maintained for each link, including the link's
8304 * capabilities and default speed/flow-control/autonegotiation settings.
8306 static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8307 fw_port_cap32_t acaps)
8310 lc->def_acaps = acaps;
8314 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8316 /* For Forward Error Control, we default to whatever the Firmware
8317 * tells us the Link is currently advertising.
8319 lc->requested_fec = FEC_AUTO;
8320 lc->fec = fwcap_to_cc_fec(lc->def_acaps);
8322 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8323 lc->acaps = lc->pcaps & ADVERT_MASK;
8324 lc->autoneg = AUTONEG_ENABLE;
8325 lc->requested_fc |= PAUSE_AUTONEG;
8328 lc->autoneg = AUTONEG_DISABLE;
8332 #define CIM_PF_NOACCESS 0xeeeeeeee
8334 int t4_wait_dev_ready(void __iomem *regs)
8338 whoami = readl(regs + PL_WHOAMI_A);
8339 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
8343 whoami = readl(regs + PL_WHOAMI_A);
8344 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
8348 u32 vendor_and_model_id;
8352 static int t4_get_flash_params(struct adapter *adap)
8354 /* Table for non-Numonix supported flash parts. Numonix parts are left
8355 * to the preexisting code. All flash parts have 64KB sectors.
8357 static struct flash_desc supported_flash[] = {
8358 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8361 unsigned int part, manufacturer;
8362 unsigned int density, size;
8366 /* Issue a Read ID Command to the Flash part. We decode supported
8367 * Flash parts and their sizes from this. There's a newer Query
8368 * Command which can retrieve detailed geometry information but many
8369 * Flash parts don't support it.
8372 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
8374 ret = sf1_read(adap, 3, 0, 1, &flashid);
8375 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
8379 /* Check to see if it's one of our non-standard supported Flash parts.
8381 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8382 if (supported_flash[part].vendor_and_model_id == flashid) {
8383 adap->params.sf_size = supported_flash[part].size_mb;
8384 adap->params.sf_nsec =
8385 adap->params.sf_size / SF_SEC_SIZE;
8389 /* Decode Flash part size. The code below looks repetative with
8390 * common encodings, but that's not guaranteed in the JEDEC
8391 * specification for the Read JADEC ID command. The only thing that
8392 * we're guaranteed by the JADEC specification is where the
8393 * Manufacturer ID is in the returned result. After that each
8394 * Manufacturer ~could~ encode things completely differently.
8395 * Note, all Flash parts must have 64KB sectors.
8397 manufacturer = flashid & 0xff;
8398 switch (manufacturer) {
8399 case 0x20: { /* Micron/Numonix */
8400 /* This Density -> Size decoding table is taken from Micron
8403 density = (flashid >> 16) & 0xff;
8405 case 0x14: /* 1MB */
8408 case 0x15: /* 2MB */
8411 case 0x16: /* 4MB */
8414 case 0x17: /* 8MB */
8417 case 0x18: /* 16MB */
8420 case 0x19: /* 32MB */
8423 case 0x20: /* 64MB */
8426 case 0x21: /* 128MB */
8429 case 0x22: /* 256MB */
8434 dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
8440 case 0xc2: { /* Macronix */
8441 /* This Density -> Size decoding table is taken from Macronix
8444 density = (flashid >> 16) & 0xff;
8446 case 0x17: /* 8MB */
8449 case 0x18: /* 16MB */
8453 dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
8459 case 0xef: { /* Winbond */
8460 /* This Density -> Size decoding table is taken from Winbond
8463 density = (flashid >> 16) & 0xff;
8465 case 0x17: /* 8MB */
8468 case 0x18: /* 16MB */
8472 dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
8479 dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
8484 /* Store decoded Flash size and fall through into vetting code. */
8485 adap->params.sf_size = size;
8486 adap->params.sf_nsec = size / SF_SEC_SIZE;
8489 if (adap->params.sf_size < FLASH_MIN_SIZE)
8490 dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8491 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
8495 static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
8500 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
8502 pci_read_config_word(adapter->pdev,
8503 pcie_cap + PCI_EXP_DEVCTL2, &val);
8504 val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
8506 pci_write_config_word(adapter->pdev,
8507 pcie_cap + PCI_EXP_DEVCTL2, val);
8512 * t4_prep_adapter - prepare SW and HW for operation
8513 * @adapter: the adapter
8514 * @reset: if true perform a HW reset
8516 * Initialize adapter SW state for the various HW modules, set initial
8517 * values for some adapter tunables, take PHYs out of reset, and
8518 * initialize the MDIO interface.
8520 int t4_prep_adapter(struct adapter *adapter)
8526 get_pci_mode(adapter, &adapter->params.pci);
8527 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
8529 ret = t4_get_flash_params(adapter);
8531 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
8535 /* Retrieve adapter's device ID
8537 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
8538 ver = device_id >> 12;
8539 adapter->params.chip = 0;
8542 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
8543 adapter->params.arch.sge_fl_db = DBPRIO_F;
8544 adapter->params.arch.mps_tcam_size =
8545 NUM_MPS_CLS_SRAM_L_INSTANCES;
8546 adapter->params.arch.mps_rplc_size = 128;
8547 adapter->params.arch.nchan = NCHAN;
8548 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
8549 adapter->params.arch.vfcount = 128;
8550 /* Congestion map is for 4 channels so that
8551 * MPS can have 4 priority per port.
8553 adapter->params.arch.cng_ch_bits_log = 2;
8556 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
8557 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
8558 adapter->params.arch.mps_tcam_size =
8559 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8560 adapter->params.arch.mps_rplc_size = 128;
8561 adapter->params.arch.nchan = NCHAN;
8562 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
8563 adapter->params.arch.vfcount = 128;
8564 adapter->params.arch.cng_ch_bits_log = 2;
8567 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
8568 adapter->params.arch.sge_fl_db = 0;
8569 adapter->params.arch.mps_tcam_size =
8570 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8571 adapter->params.arch.mps_rplc_size = 256;
8572 adapter->params.arch.nchan = 2;
8573 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
8574 adapter->params.arch.vfcount = 256;
8575 /* Congestion map will be for 2 channels so that
8576 * MPS can have 8 priority per port.
8578 adapter->params.arch.cng_ch_bits_log = 3;
8581 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
8586 adapter->params.cim_la_size = CIMLA_SIZE;
8587 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8590 * Default port for debugging in case we can't reach FW.
8592 adapter->params.nports = 1;
8593 adapter->params.portvec = 1;
8594 adapter->params.vpd.cclk = 50000;
8596 /* Set pci completion timeout value to 4 seconds. */
8597 set_pcie_completion_timeout(adapter, 0xd);
8602 * t4_shutdown_adapter - shut down adapter, host & wire
8603 * @adapter: the adapter
8605 * Perform an emergency shutdown of the adapter and stop it from
8606 * continuing any further communication on the ports or DMA to the
8607 * host. This is typically used when the adapter and/or firmware
8608 * have crashed and we want to prevent any further accidental
8609 * communication with the rest of the world. This will also force
8610 * the port Link Status to go down -- if register writes work --
8611 * which should help our peers figure out that we're down.
8613 int t4_shutdown_adapter(struct adapter *adapter)
8617 t4_intr_disable(adapter);
8618 t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
8619 for_each_port(adapter, port) {
8620 u32 a_port_cfg = is_t4(adapter->params.chip) ?
8621 PORT_REG(port, XGMAC_PORT_CFG_A) :
8622 T5_PORT_REG(port, MAC_PORT_CFG_A);
8624 t4_write_reg(adapter, a_port_cfg,
8625 t4_read_reg(adapter, a_port_cfg)
8626 & ~SIGNAL_DET_V(1));
8628 t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
8634 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
8635 * @adapter: the adapter
8636 * @qid: the Queue ID
8637 * @qtype: the Ingress or Egress type for @qid
8638 * @user: true if this request is for a user mode queue
8639 * @pbar2_qoffset: BAR2 Queue Offset
8640 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
8642 * Returns the BAR2 SGE Queue Registers information associated with the
8643 * indicated Absolute Queue ID. These are passed back in return value
8644 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
8645 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
8647 * This may return an error which indicates that BAR2 SGE Queue
8648 * registers aren't available. If an error is not returned, then the
8649 * following values are returned:
8651 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
8652 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
8654 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
8655 * require the "Inferred Queue ID" ability may be used. E.g. the
8656 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
8657 * then these "Inferred Queue ID" register may not be used.
8659 int t4_bar2_sge_qregs(struct adapter *adapter,
8661 enum t4_bar2_qtype qtype,
8664 unsigned int *pbar2_qid)
8666 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
8667 u64 bar2_page_offset, bar2_qoffset;
8668 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
8670 /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
8671 if (!user && is_t4(adapter->params.chip))
8674 /* Get our SGE Page Size parameters.
8676 page_shift = adapter->params.sge.hps + 10;
8677 page_size = 1 << page_shift;
8679 /* Get the right Queues per Page parameters for our Queue.
8681 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
8682 ? adapter->params.sge.eq_qpp
8683 : adapter->params.sge.iq_qpp);
8684 qpp_mask = (1 << qpp_shift) - 1;
8686 /* Calculate the basics of the BAR2 SGE Queue register area:
8687 * o The BAR2 page the Queue registers will be in.
8688 * o The BAR2 Queue ID.
8689 * o The BAR2 Queue ID Offset into the BAR2 page.
8691 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
8692 bar2_qid = qid & qpp_mask;
8693 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
8695 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
8696 * hardware will infer the Absolute Queue ID simply from the writes to
8697 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
8698 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
8699 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
8700 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
8701 * from the BAR2 Page and BAR2 Queue ID.
8703 * One important censequence of this is that some BAR2 SGE registers
8704 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
8705 * there. But other registers synthesize the SGE Queue ID purely
8706 * from the writes to the registers -- the Write Combined Doorbell
8707 * Buffer is a good example. These BAR2 SGE Registers are only
8708 * available for those BAR2 SGE Register areas where the SGE Absolute
8709 * Queue ID can be inferred from simple writes.
8711 bar2_qoffset = bar2_page_offset;
8712 bar2_qinferred = (bar2_qid_offset < page_size);
8713 if (bar2_qinferred) {
8714 bar2_qoffset += bar2_qid_offset;
8718 *pbar2_qoffset = bar2_qoffset;
8719 *pbar2_qid = bar2_qid;
8724 * t4_init_devlog_params - initialize adapter->params.devlog
8725 * @adap: the adapter
8727 * Initialize various fields of the adapter's Firmware Device Log
8728 * Parameters structure.
8730 int t4_init_devlog_params(struct adapter *adap)
8732 struct devlog_params *dparams = &adap->params.devlog;
8734 unsigned int devlog_meminfo;
8735 struct fw_devlog_cmd devlog_cmd;
8738 /* If we're dealing with newer firmware, the Device Log Paramerters
8739 * are stored in a designated register which allows us to access the
8740 * Device Log even if we can't talk to the firmware.
8743 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
8745 unsigned int nentries, nentries128;
8747 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
8748 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
8750 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
8751 nentries = (nentries128 + 1) * 128;
8752 dparams->size = nentries * sizeof(struct fw_devlog_e);
8757 /* Otherwise, ask the firmware for it's Device Log Parameters.
8759 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
8760 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
8761 FW_CMD_REQUEST_F | FW_CMD_READ_F);
8762 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
8763 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
8769 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
8770 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
8771 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
8772 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
8778 * t4_init_sge_params - initialize adap->params.sge
8779 * @adapter: the adapter
8781 * Initialize various fields of the adapter's SGE Parameters structure.
8783 int t4_init_sge_params(struct adapter *adapter)
8785 struct sge_params *sge_params = &adapter->params.sge;
8787 unsigned int s_hps, s_qpp;
8789 /* Extract the SGE Page Size for our PF.
8791 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
8792 s_hps = (HOSTPAGESIZEPF0_S +
8793 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
8794 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
8796 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
8798 s_qpp = (QUEUESPERPAGEPF0_S +
8799 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
8800 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
8801 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
8802 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
8803 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
8809 * t4_init_tp_params - initialize adap->params.tp
8810 * @adap: the adapter
8811 * @sleep_ok: if true we may sleep while awaiting command completion
8813 * Initialize various fields of the adapter's TP Parameters structure.
8815 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
8820 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
8821 adap->params.tp.tre = TIMERRESOLUTION_G(v);
8822 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
8824 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8825 for (chan = 0; chan < NCHAN; chan++)
8826 adap->params.tp.tx_modq[chan] = chan;
8828 /* Cache the adapter's Compressed Filter Mode and global Incress
8831 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
8832 TP_VLAN_PRI_MAP_A, sleep_ok);
8833 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
8834 TP_INGRESS_CONFIG_A, sleep_ok);
8836 /* For T6, cache the adapter's compressed error vector
8837 * and passing outer header info for encapsulated packets.
8839 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
8840 v = t4_read_reg(adap, TP_OUT_CONFIG_A);
8841 adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
8844 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
8845 * shift positions of several elements of the Compressed Filter Tuple
8846 * for this adapter which we need frequently ...
8848 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
8849 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
8850 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
8851 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
8852 adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
8853 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
8855 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
8857 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
8859 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
8861 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
8864 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
8865 * represents the presence of an Outer VLAN instead of a VNIC ID.
8867 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
8868 adap->params.tp.vnic_shift = -1;
8870 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
8871 adap->params.tp.hash_filter_mask = v;
8872 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
8873 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
8878 * t4_filter_field_shift - calculate filter field shift
8879 * @adap: the adapter
8880 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8882 * Return the shift position of a filter field within the Compressed
8883 * Filter Tuple. The filter field is specified via its selection bit
8884 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
8886 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8888 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8892 if ((filter_mode & filter_sel) == 0)
8895 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8896 switch (filter_mode & sel) {
8898 field_shift += FT_FCOE_W;
8901 field_shift += FT_PORT_W;
8904 field_shift += FT_VNIC_ID_W;
8907 field_shift += FT_VLAN_W;
8910 field_shift += FT_TOS_W;
8913 field_shift += FT_PROTOCOL_W;
8916 field_shift += FT_ETHERTYPE_W;
8919 field_shift += FT_MACMATCH_W;
8922 field_shift += FT_MPSHITTYPE_W;
8924 case FRAGMENTATION_F:
8925 field_shift += FT_FRAGMENTATION_W;
8932 int t4_init_rss_mode(struct adapter *adap, int mbox)
8935 struct fw_rss_vi_config_cmd rvc;
8937 memset(&rvc, 0, sizeof(rvc));
8939 for_each_port(adap, i) {
8940 struct port_info *p = adap2pinfo(adap, i);
8943 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
8944 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8945 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
8946 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
8947 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
8950 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
8956 * t4_init_portinfo - allocate a virtual interface and initialize port_info
8957 * @pi: the port_info
8958 * @mbox: mailbox to use for the FW command
8959 * @port: physical port associated with the VI
8960 * @pf: the PF owning the VI
8961 * @vf: the VF owning the VI
8962 * @mac: the MAC address of the VI
8964 * Allocates a virtual interface for the given physical port. If @mac is
8965 * not %NULL it contains the MAC address of the VI as assigned by FW.
8966 * @mac should be large enough to hold an Ethernet address.
8967 * Returns < 0 on error.
8969 int t4_init_portinfo(struct port_info *pi, int mbox,
8970 int port, int pf, int vf, u8 mac[])
8972 struct adapter *adapter = pi->adapter;
8973 unsigned int fw_caps = adapter->params.fw_caps_support;
8974 struct fw_port_cmd cmd;
8975 unsigned int rss_size;
8976 enum fw_port_type port_type;
8978 fw_port_cap32_t pcaps, acaps;
8981 /* If we haven't yet determined whether we're talking to Firmware
8982 * which knows the new 32-bit Port Capabilities, it's time to find
8983 * out now. This will also tell new Firmware to send us Port Status
8984 * Updates using the new 32-bit Port Capabilities version of the
8985 * Port Information message.
8987 if (fw_caps == FW_CAPS_UNKNOWN) {
8990 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
8991 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
8993 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val);
8994 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
8995 adapter->params.fw_caps_support = fw_caps;
8998 memset(&cmd, 0, sizeof(cmd));
8999 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
9000 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9001 FW_PORT_CMD_PORTID_V(port));
9002 cmd.action_to_len16 = cpu_to_be32(
9003 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9004 ? FW_PORT_ACTION_GET_PORT_INFO
9005 : FW_PORT_ACTION_GET_PORT_INFO32) |
9007 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
9011 /* Extract the various fields from the Port Information message.
9013 if (fw_caps == FW_CAPS16) {
9014 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9016 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9017 mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9018 ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9020 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9021 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9023 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9025 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9026 mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9027 ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9029 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9030 acaps = be32_to_cpu(cmd.u.info32.acaps32);
9033 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
9040 pi->rss_size = rss_size;
9042 pi->port_type = port_type;
9043 pi->mdio_addr = mdio_addr;
9044 pi->mod_type = FW_PORT_MOD_TYPE_NA;
9046 init_link_config(&pi->link_cfg, pcaps, acaps);
9050 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9055 for_each_port(adap, i) {
9056 struct port_info *pi = adap2pinfo(adap, i);
9058 while ((adap->params.portvec & (1 << j)) == 0)
9061 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9065 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
9072 * t4_read_cimq_cfg - read CIM queue configuration
9073 * @adap: the adapter
9074 * @base: holds the queue base addresses in bytes
9075 * @size: holds the queue sizes in bytes
9076 * @thres: holds the queue full thresholds in bytes
9078 * Returns the current configuration of the CIM queues, starting with
9079 * the IBQs, then the OBQs.
9081 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9084 int cim_num_obq = is_t4(adap->params.chip) ?
9085 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9087 for (i = 0; i < CIM_NUM_IBQ; i++) {
9088 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9090 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9091 /* value is in 256-byte units */
9092 *base++ = CIMQBASE_G(v) * 256;
9093 *size++ = CIMQSIZE_G(v) * 256;
9094 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9096 for (i = 0; i < cim_num_obq; i++) {
9097 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9099 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9100 /* value is in 256-byte units */
9101 *base++ = CIMQBASE_G(v) * 256;
9102 *size++ = CIMQSIZE_G(v) * 256;
9107 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9108 * @adap: the adapter
9109 * @qid: the queue index
9110 * @data: where to store the queue contents
9111 * @n: capacity of @data in 32-bit words
9113 * Reads the contents of the selected CIM queue starting at address 0 up
9114 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9115 * error and the number of 32-bit words actually read on success.
9117 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9119 int i, err, attempts;
9121 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9123 if (qid > 5 || (n & 3))
9126 addr = qid * nwords;
9130 /* It might take 3-10ms before the IBQ debug read access is allowed.
9131 * Wait for 1 Sec with a delay of 1 usec.
9135 for (i = 0; i < n; i++, addr++) {
9136 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9138 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9142 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9144 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
9149 * t4_read_cim_obq - read the contents of a CIM outbound queue
9150 * @adap: the adapter
9151 * @qid: the queue index
9152 * @data: where to store the queue contents
9153 * @n: capacity of @data in 32-bit words
9155 * Reads the contents of the selected CIM queue starting at address 0 up
9156 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9157 * error and the number of 32-bit words actually read on success.
9159 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9162 unsigned int addr, v, nwords;
9163 int cim_num_obq = is_t4(adap->params.chip) ?
9164 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9166 if ((qid > (cim_num_obq - 1)) || (n & 3))
9169 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9170 QUENUMSELECT_V(qid));
9171 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9173 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
9174 nwords = CIMQSIZE_G(v) * 64; /* same */
9178 for (i = 0; i < n; i++, addr++) {
9179 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9181 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9185 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9187 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
9192 * t4_cim_read - read a block from CIM internal address space
9193 * @adap: the adapter
9194 * @addr: the start address within the CIM address space
9195 * @n: number of words to read
9196 * @valp: where to store the result
9198 * Reads a block of 4-byte words from the CIM intenal address space.
9200 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9205 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9208 for ( ; !ret && n--; addr += 4) {
9209 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9210 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9213 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9219 * t4_cim_write - write a block into CIM internal address space
9220 * @adap: the adapter
9221 * @addr: the start address within the CIM address space
9222 * @n: number of words to write
9223 * @valp: set of values to write
9225 * Writes a block of 4-byte words into the CIM intenal address space.
9227 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9228 const unsigned int *valp)
9232 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9235 for ( ; !ret && n--; addr += 4) {
9236 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9237 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9238 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9244 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9247 return t4_cim_write(adap, addr, 1, &val);
9251 * t4_cim_read_la - read CIM LA capture buffer
9252 * @adap: the adapter
9253 * @la_buf: where to store the LA data
9254 * @wrptr: the HW write pointer within the capture buffer
9256 * Reads the contents of the CIM LA buffer with the most recent entry at
9257 * the end of the returned data and with the entry at @wrptr first.
9258 * We try to leave the LA in the running state we find it in.
9260 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9263 unsigned int cfg, val, idx;
9265 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9269 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
9270 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9275 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9279 idx = UPDBGLAWRPTR_G(val);
9283 for (i = 0; i < adap->params.cim_la_size; i++) {
9284 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9285 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9288 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9291 if (val & UPDBGLARDEN_F) {
9295 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9299 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9300 * identify the 32-bit portion of the full 312-bit data
9302 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9303 idx = (idx & 0xff0) + 0x10;
9306 /* address can't exceed 0xfff */
9307 idx &= UPDBGLARDPTR_M;
9310 if (cfg & UPDBGLAEN_F) {
9311 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9312 cfg & ~UPDBGLARDEN_F);
9320 * t4_tp_read_la - read TP LA capture buffer
9321 * @adap: the adapter
9322 * @la_buf: where to store the LA data
9323 * @wrptr: the HW write pointer within the capture buffer
9325 * Reads the contents of the TP LA buffer with the most recent entry at
9326 * the end of the returned data and with the entry at @wrptr first.
9327 * We leave the LA in the running state we find it in.
9329 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9331 bool last_incomplete;
9332 unsigned int i, cfg, val, idx;
9334 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
9335 if (cfg & DBGLAENABLE_F) /* freeze LA */
9336 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9337 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
9339 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
9340 idx = DBGLAWPTR_G(val);
9341 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
9342 if (last_incomplete)
9343 idx = (idx + 1) & DBGLARPTR_M;
9348 val &= ~DBGLARPTR_V(DBGLARPTR_M);
9349 val |= adap->params.tp.la_mask;
9351 for (i = 0; i < TPLA_SIZE; i++) {
9352 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
9353 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
9354 idx = (idx + 1) & DBGLARPTR_M;
9357 /* Wipe out last entry if it isn't valid */
9358 if (last_incomplete)
9359 la_buf[TPLA_SIZE - 1] = ~0ULL;
9361 if (cfg & DBGLAENABLE_F) /* restore running state */
9362 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9363 cfg | adap->params.tp.la_mask);
9366 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
9367 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
9368 * state for more than the Warning Threshold then we'll issue a warning about
9369 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
9370 * appears to be hung every Warning Repeat second till the situation clears.
9371 * If the situation clears, we'll note that as well.
9373 #define SGE_IDMA_WARN_THRESH 1
9374 #define SGE_IDMA_WARN_REPEAT 300
9377 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
9378 * @adapter: the adapter
9379 * @idma: the adapter IDMA Monitor state
9381 * Initialize the state of an SGE Ingress DMA Monitor.
9383 void t4_idma_monitor_init(struct adapter *adapter,
9384 struct sge_idma_monitor_state *idma)
9386 /* Initialize the state variables for detecting an SGE Ingress DMA
9387 * hang. The SGE has internal counters which count up on each clock
9388 * tick whenever the SGE finds its Ingress DMA State Engines in the
9389 * same state they were on the previous clock tick. The clock used is
9390 * the Core Clock so we have a limit on the maximum "time" they can
9391 * record; typically a very small number of seconds. For instance,
9392 * with a 600MHz Core Clock, we can only count up to a bit more than
9393 * 7s. So we'll synthesize a larger counter in order to not run the
9394 * risk of having the "timers" overflow and give us the flexibility to
9395 * maintain a Hung SGE State Machine of our own which operates across
9396 * a longer time frame.
9398 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
9399 idma->idma_stalled[0] = 0;
9400 idma->idma_stalled[1] = 0;
9404 * t4_idma_monitor - monitor SGE Ingress DMA state
9405 * @adapter: the adapter
9406 * @idma: the adapter IDMA Monitor state
9407 * @hz: number of ticks/second
9408 * @ticks: number of ticks since the last IDMA Monitor call
9410 void t4_idma_monitor(struct adapter *adapter,
9411 struct sge_idma_monitor_state *idma,
9414 int i, idma_same_state_cnt[2];
9416 /* Read the SGE Debug Ingress DMA Same State Count registers. These
9417 * are counters inside the SGE which count up on each clock when the
9418 * SGE finds its Ingress DMA State Engines in the same states they
9419 * were in the previous clock. The counters will peg out at
9420 * 0xffffffff without wrapping around so once they pass the 1s
9421 * threshold they'll stay above that till the IDMA state changes.
9423 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
9424 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
9425 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9427 for (i = 0; i < 2; i++) {
9428 u32 debug0, debug11;
9430 /* If the Ingress DMA Same State Counter ("timer") is less
9431 * than 1s, then we can reset our synthesized Stall Timer and
9432 * continue. If we have previously emitted warnings about a
9433 * potential stalled Ingress Queue, issue a note indicating
9434 * that the Ingress Queue has resumed forward progress.
9436 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
9437 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
9438 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
9439 "resumed after %d seconds\n",
9440 i, idma->idma_qid[i],
9441 idma->idma_stalled[i] / hz);
9442 idma->idma_stalled[i] = 0;
9446 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
9447 * domain. The first time we get here it'll be because we
9448 * passed the 1s Threshold; each additional time it'll be
9449 * because the RX Timer Callback is being fired on its regular
9452 * If the stall is below our Potential Hung Ingress Queue
9453 * Warning Threshold, continue.
9455 if (idma->idma_stalled[i] == 0) {
9456 idma->idma_stalled[i] = hz;
9457 idma->idma_warn[i] = 0;
9459 idma->idma_stalled[i] += ticks;
9460 idma->idma_warn[i] -= ticks;
9463 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
9466 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
9468 if (idma->idma_warn[i] > 0)
9470 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
9472 /* Read and save the SGE IDMA State and Queue ID information.
9473 * We do this every time in case it changes across time ...
9474 * can't be too careful ...
9476 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
9477 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9478 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
9480 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
9481 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9482 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
9484 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
9485 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
9486 i, idma->idma_qid[i], idma->idma_state[i],
9487 idma->idma_stalled[i] / hz,
9489 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
9494 * t4_load_cfg - download config file
9495 * @adap: the adapter
9496 * @cfg_data: the cfg text file to write
9497 * @size: text file size
9499 * Write the supplied config text file to the card's serial flash.
9501 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9503 int ret, i, n, cfg_addr;
9505 unsigned int flash_cfg_start_sec;
9506 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9508 cfg_addr = t4_flash_cfg_addr(adap);
9513 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9515 if (size > FLASH_CFG_MAX_SIZE) {
9516 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
9517 FLASH_CFG_MAX_SIZE);
9521 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
9523 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9524 flash_cfg_start_sec + i - 1);
9525 /* If size == 0 then we're simply erasing the FLASH sectors associated
9526 * with the on-adapter Firmware Configuration File.
9528 if (ret || size == 0)
9531 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9532 for (i = 0; i < size; i += SF_PAGE_SIZE) {
9533 if ((size - i) < SF_PAGE_SIZE)
9537 ret = t4_write_flash(adap, addr, n, cfg_data);
9541 addr += SF_PAGE_SIZE;
9542 cfg_data += SF_PAGE_SIZE;
9547 dev_err(adap->pdev_dev, "config file %s failed %d\n",
9548 (size == 0 ? "clear" : "download"), ret);
9553 * t4_set_vf_mac - Set MAC address for the specified VF
9554 * @adapter: The adapter
9555 * @vf: one of the VFs instantiated by the specified PF
9556 * @naddr: the number of MAC addresses
9557 * @addr: the MAC address(es) to be set to the specified VF
9559 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
9560 unsigned int naddr, u8 *addr)
9562 struct fw_acl_mac_cmd cmd;
9564 memset(&cmd, 0, sizeof(cmd));
9565 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
9568 FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
9569 FW_ACL_MAC_CMD_VFN_V(vf));
9571 /* Note: Do not enable the ACL */
9572 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
9575 switch (adapter->pf) {
9577 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
9580 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
9583 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
9586 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
9590 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
9594 * t4_read_pace_tbl - read the pace table
9595 * @adap: the adapter
9596 * @pace_vals: holds the returned values
9598 * Returns the values of TP's pace table in microseconds.
9600 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9604 for (i = 0; i < NTX_SCHED; i++) {
9605 t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
9606 v = t4_read_reg(adap, TP_PACE_TABLE_A);
9607 pace_vals[i] = dack_ticks_to_usec(adap, v);
9612 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
9613 * @adap: the adapter
9614 * @sched: the scheduler index
9615 * @kbps: the byte rate in Kbps
9616 * @ipg: the interpacket delay in tenths of nanoseconds
9617 * @sleep_ok: if true we may sleep while awaiting command completion
9619 * Return the current configuration of a HW Tx scheduler.
9621 void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
9622 unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
9624 unsigned int v, addr, bpt, cpt;
9627 addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
9628 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9631 bpt = (v >> 8) & 0xff;
9634 *kbps = 0; /* scheduler disabled */
9636 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9637 *kbps = (v * bpt) / 125;
9641 addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
9642 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9646 *ipg = (10000 * v) / core_ticks_per_usec(adap);
9650 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9651 int rateunit, int ratemode, int channel, int class,
9652 int minrate, int maxrate, int weight, int pktsize)
9654 struct fw_sched_cmd cmd;
9656 memset(&cmd, 0, sizeof(cmd));
9657 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
9660 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9662 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9663 cmd.u.params.type = type;
9664 cmd.u.params.level = level;
9665 cmd.u.params.mode = mode;
9666 cmd.u.params.ch = channel;
9667 cmd.u.params.cl = class;
9668 cmd.u.params.unit = rateunit;
9669 cmd.u.params.rate = ratemode;
9670 cmd.u.params.min = cpu_to_be32(minrate);
9671 cmd.u.params.max = cpu_to_be32(maxrate);
9672 cmd.u.params.weight = cpu_to_be16(weight);
9673 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9675 return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),