2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/delay.h>
38 #include "t4_values.h"
40 #include "t4fw_version.h"
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
61 u32 val = t4_read_reg(adapter, reg);
63 if (!!(val & mask) == polarity) {
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
89 * Sets a register field specified by the supplied mask to the
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
102 * t4_read_indirect - read indirectly addressed registers
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
110 * Reads registers that are accessed indirectly through an address/data
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
125 * t4_write_indirect - write indirectly addressed registers
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
154 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
156 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
161 if (is_t4(adap->params.chip))
164 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
167 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168 * Configuration Space read. (None of the other fields matter when
169 * ENABLE is 0 so a simple register write is easier than a
170 * read-modify-write via t4_set_reg_field().)
172 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
176 * t4_report_fw_error - report firmware error
179 * The adapter firmware can indicate error conditions to the host.
180 * If the firmware has indicated an error, print out the reason for
181 * the firmware error.
183 static void t4_report_fw_error(struct adapter *adap)
185 static const char *const reason[] = {
186 "Crash", /* PCIE_FW_EVAL_CRASH */
187 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
188 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
189 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
192 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193 "Reserved", /* reserved */
197 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198 if (pcie_fw & PCIE_FW_ERR_F)
199 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200 reason[PCIE_FW_EVAL_G(pcie_fw)]);
204 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
206 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
209 for ( ; nflit; nflit--, mbox_addr += 8)
210 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
214 * Handle a FW assertion reported in a mailbox.
216 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
218 struct fw_debug_cmd asrt;
220 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
221 dev_alert(adap->pdev_dev,
222 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
223 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
224 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
228 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
229 * @adapter: the adapter
230 * @cmd: the Firmware Mailbox Command or Reply
231 * @size: command length in bytes
232 * @access: the time (ms) needed to access the Firmware Mailbox
233 * @execute: the time (ms) the command spent being executed
235 static void t4_record_mbox(struct adapter *adapter,
236 const __be64 *cmd, unsigned int size,
237 int access, int execute)
239 struct mbox_cmd_log *log = adapter->mbox_log;
240 struct mbox_cmd *entry;
243 entry = mbox_cmd_log_entry(log, log->cursor++);
244 if (log->cursor == log->size)
247 for (i = 0; i < size / 8; i++)
248 entry->cmd[i] = be64_to_cpu(cmd[i]);
249 while (i < MBOX_LEN / 8)
251 entry->timestamp = jiffies;
252 entry->seqno = log->seqno++;
253 entry->access = access;
254 entry->execute = execute;
258 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
260 * @mbox: index of the mailbox to use
261 * @cmd: the command to write
262 * @size: command length in bytes
263 * @rpl: where to optionally store the reply
264 * @sleep_ok: if true we may sleep while awaiting command completion
265 * @timeout: time to wait for command to finish before timing out
267 * Sends the given command to FW through the selected mailbox and waits
268 * for the FW to execute the command. If @rpl is not %NULL it is used to
269 * store the FW's reply to the command. The command and its optional
270 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
271 * to respond. @sleep_ok determines whether we may sleep while awaiting
272 * the response. If sleeping is allowed we use progressive backoff
275 * The return value is 0 on success or a negative errno on failure. A
276 * failure can happen either because we are not able to execute the
277 * command or FW executes it but signals an error. In the latter case
278 * the return value is the error code indicated by FW (negated).
280 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
281 int size, void *rpl, bool sleep_ok, int timeout)
283 static const int delay[] = {
284 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
287 struct mbox_list entry;
292 int i, ms, delay_idx, ret;
293 const __be64 *p = cmd;
294 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
295 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
296 __be64 cmd_rpl[MBOX_LEN / 8];
299 if ((size & 15) || size > MBOX_LEN)
303 * If the device is off-line, as in EEH, commands will time out.
304 * Fail them early so we don't waste time waiting.
306 if (adap->pdev->error_state != pci_channel_io_normal)
309 /* If we have a negative timeout, that implies that we can't sleep. */
315 /* Queue ourselves onto the mailbox access list. When our entry is at
316 * the front of the list, we have rights to access the mailbox. So we
317 * wait [for a while] till we're at the front [or bail out with an
320 spin_lock(&adap->mbox_lock);
321 list_add_tail(&entry.list, &adap->mlist.list);
322 spin_unlock(&adap->mbox_lock);
327 for (i = 0; ; i += ms) {
328 /* If we've waited too long, return a busy indication. This
329 * really ought to be based on our initial position in the
330 * mailbox access list but this is a start. We very rearely
331 * contend on access to the mailbox ...
333 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
334 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
335 spin_lock(&adap->mbox_lock);
336 list_del(&entry.list);
337 spin_unlock(&adap->mbox_lock);
338 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
339 t4_record_mbox(adap, cmd, size, access, ret);
343 /* If we're at the head, break out and start the mailbox
346 if (list_first_entry(&adap->mlist.list, struct mbox_list,
350 /* Delay for a bit before checking again ... */
352 ms = delay[delay_idx]; /* last element may repeat */
353 if (delay_idx < ARRAY_SIZE(delay) - 1)
361 /* Loop trying to get ownership of the mailbox. Return an error
362 * if we can't gain ownership.
364 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
365 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367 if (v != MBOX_OWNER_DRV) {
368 spin_lock(&adap->mbox_lock);
369 list_del(&entry.list);
370 spin_unlock(&adap->mbox_lock);
371 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
372 t4_record_mbox(adap, cmd, size, access, ret);
376 /* Copy in the new mailbox command and send it on its way ... */
377 t4_record_mbox(adap, cmd, size, access, 0);
378 for (i = 0; i < size; i += 8)
379 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
381 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
382 t4_read_reg(adap, ctl_reg); /* flush write */
388 !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
392 ms = delay[delay_idx]; /* last element may repeat */
393 if (delay_idx < ARRAY_SIZE(delay) - 1)
399 v = t4_read_reg(adap, ctl_reg);
400 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
401 if (!(v & MBMSGVALID_F)) {
402 t4_write_reg(adap, ctl_reg, 0);
406 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
407 res = be64_to_cpu(cmd_rpl[0]);
409 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
410 fw_asrt(adap, data_reg);
411 res = FW_CMD_RETVAL_V(EIO);
413 memcpy(rpl, cmd_rpl, size);
416 t4_write_reg(adap, ctl_reg, 0);
419 t4_record_mbox(adap, cmd_rpl,
420 MBOX_LEN, access, execute);
421 spin_lock(&adap->mbox_lock);
422 list_del(&entry.list);
423 spin_unlock(&adap->mbox_lock);
424 return -FW_CMD_RETVAL_G((int)res);
428 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
429 t4_record_mbox(adap, cmd, size, access, ret);
430 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
431 *(const u8 *)cmd, mbox);
432 t4_report_fw_error(adap);
433 spin_lock(&adap->mbox_lock);
434 list_del(&entry.list);
435 spin_unlock(&adap->mbox_lock);
440 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
441 void *rpl, bool sleep_ok)
443 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
447 static int t4_edc_err_read(struct adapter *adap, int idx)
449 u32 edc_ecc_err_addr_reg;
452 if (is_t4(adap->params.chip)) {
453 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
456 if (idx != 0 && idx != 1) {
457 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
461 edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
462 rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
465 "edc%d err addr 0x%x: 0x%x.\n",
466 idx, edc_ecc_err_addr_reg,
467 t4_read_reg(adap, edc_ecc_err_addr_reg));
469 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
471 (unsigned long long)t4_read_reg64(adap, rdata_reg),
472 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
473 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
474 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
475 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
476 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
477 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
478 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
479 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
485 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
487 * @win: PCI-E Memory Window to use
488 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
489 * @addr: address within indicated memory type
490 * @len: amount of memory to transfer
491 * @hbuf: host memory buffer
492 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
494 * Reads/writes an [almost] arbitrary memory region in the firmware: the
495 * firmware memory address and host buffer must be aligned on 32-bit
496 * boudaries; the length may be arbitrary. The memory is transferred as
497 * a raw byte sequence from/to the firmware's memory. If this memory
498 * contains data structures which contain multi-byte integers, it's the
499 * caller's responsibility to perform appropriate byte order conversions.
501 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
502 u32 len, void *hbuf, int dir)
504 u32 pos, offset, resid, memoffset;
505 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
508 /* Argument sanity checks ...
510 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
514 /* It's convenient to be able to handle lengths which aren't a
515 * multiple of 32-bits because we often end up transferring files to
516 * the firmware. So we'll handle that by normalizing the length here
517 * and then handling any residual transfer at the end.
522 /* Offset into the region of memory which is being accessed
525 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
526 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
529 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
530 if (mtype == MEM_HMA) {
531 memoffset = 2 * (edc_size * 1024 * 1024);
532 } else if (mtype != MEM_MC1) {
533 memoffset = (mtype * (edc_size * 1024 * 1024));
535 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
536 MA_EXT_MEMORY0_BAR_A));
537 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
540 /* Determine the PCIE_MEM_ACCESS_OFFSET */
541 addr = addr + memoffset;
543 /* Each PCI-E Memory Window is programmed with a window size -- or
544 * "aperture" -- which controls the granularity of its mapping onto
545 * adapter memory. We need to grab that aperture in order to know
546 * how to use the specified window. The window is also programmed
547 * with the base address of the Memory Window in BAR0's address
548 * space. For T4 this is an absolute PCI-E Bus Address. For T5
549 * the address is relative to BAR0.
551 mem_reg = t4_read_reg(adap,
552 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
554 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
555 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
556 if (is_t4(adap->params.chip))
557 mem_base -= adap->t4_bar0;
558 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
560 /* Calculate our initial PCI-E Memory Window Position and Offset into
563 pos = addr & ~(mem_aperture-1);
566 /* Set up initial PCI-E Memory Window to cover the start of our
567 * transfer. (Read it back to ensure that changes propagate before we
568 * attempt to use the new value.)
571 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
574 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
576 /* Transfer data to/from the adapter as long as there's an integral
577 * number of 32-bit transfers to complete.
579 * A note on Endianness issues:
581 * The "register" reads and writes below from/to the PCI-E Memory
582 * Window invoke the standard adapter Big-Endian to PCI-E Link
583 * Little-Endian "swizzel." As a result, if we have the following
584 * data in adapter memory:
586 * Memory: ... | b0 | b1 | b2 | b3 | ...
587 * Address: i+0 i+1 i+2 i+3
589 * Then a read of the adapter memory via the PCI-E Memory Window
594 * [ b3 | b2 | b1 | b0 ]
596 * If this value is stored into local memory on a Little-Endian system
597 * it will show up correctly in local memory as:
599 * ( ..., b0, b1, b2, b3, ... )
601 * But on a Big-Endian system, the store will show up in memory
602 * incorrectly swizzled as:
604 * ( ..., b3, b2, b1, b0, ... )
606 * So we need to account for this in the reads and writes to the
607 * PCI-E Memory Window below by undoing the register read/write
611 if (dir == T4_MEMORY_READ)
612 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
615 t4_write_reg(adap, mem_base + offset,
616 (__force u32)cpu_to_le32(*buf++));
617 offset += sizeof(__be32);
618 len -= sizeof(__be32);
620 /* If we've reached the end of our current window aperture,
621 * move the PCI-E Memory Window on to the next. Note that
622 * doing this here after "len" may be 0 allows us to set up
623 * the PCI-E Memory Window for a possible final residual
626 if (offset == mem_aperture) {
630 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
633 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
638 /* If the original transfer had a length which wasn't a multiple of
639 * 32-bits, now's where we need to finish off the transfer of the
640 * residual amount. The PCI-E Memory Window has already been moved
641 * above (if necessary) to cover this final transfer.
651 if (dir == T4_MEMORY_READ) {
652 last.word = le32_to_cpu(
653 (__force __le32)t4_read_reg(adap,
655 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
656 bp[i] = last.byte[i];
659 for (i = resid; i < 4; i++)
661 t4_write_reg(adap, mem_base + offset,
662 (__force u32)cpu_to_le32(last.word));
669 /* Return the specified PCI-E Configuration Space register from our Physical
670 * Function. We try first via a Firmware LDST Command since we prefer to let
671 * the firmware own all of these registers, but if that fails we go for it
672 * directly ourselves.
674 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
676 u32 val, ldst_addrspace;
678 /* If fw_attach != 0, construct and send the Firmware LDST Command to
679 * retrieve the specified PCI-E Configuration Space register.
681 struct fw_ldst_cmd ldst_cmd;
684 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
685 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
686 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
690 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
691 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
692 ldst_cmd.u.pcie.ctrl_to_fn =
693 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
694 ldst_cmd.u.pcie.r = reg;
696 /* If the LDST Command succeeds, return the result, otherwise
697 * fall through to reading it directly ourselves ...
699 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
702 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
704 /* Read the desired Configuration Space register via the PCI-E
705 * Backdoor mechanism.
707 t4_hw_pci_read_cfg4(adap, reg, &val);
711 /* Get the window based on base passed to it.
712 * Window aperture is currently unhandled, but there is no use case for it
715 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
720 if (is_t4(adap->params.chip)) {
723 /* Truncation intentional: we only read the bottom 32-bits of
724 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
725 * mechanism to read BAR0 instead of using
726 * pci_resource_start() because we could be operating from
727 * within a Virtual Machine which is trapping our accesses to
728 * our Configuration Space and we need to set up the PCI-E
729 * Memory Window decoders with the actual addresses which will
730 * be coming across the PCI-E link.
732 bar0 = t4_read_pcie_cfg4(adap, pci_base);
734 adap->t4_bar0 = bar0;
736 ret = bar0 + memwin_base;
738 /* For T5, only relative offset inside the PCIe BAR is passed */
744 /* Get the default utility window (win0) used by everyone */
745 u32 t4_get_util_window(struct adapter *adap)
747 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
748 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
751 /* Set up memory window for accessing adapter memory ranges. (Read
752 * back MA register to ensure that changes propagate before we attempt
753 * to use the new values.)
755 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
758 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
759 memwin_base | BIR_V(0) |
760 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
762 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
766 * t4_get_regs_len - return the size of the chips register set
767 * @adapter: the adapter
769 * Returns the size of the chip's BAR0 register space.
771 unsigned int t4_get_regs_len(struct adapter *adapter)
773 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
775 switch (chip_version) {
777 return T4_REGMAP_SIZE;
781 return T5_REGMAP_SIZE;
784 dev_err(adapter->pdev_dev,
785 "Unsupported chip version %d\n", chip_version);
790 * t4_get_regs - read chip registers into provided buffer
792 * @buf: register buffer
793 * @buf_size: size (in bytes) of register buffer
795 * If the provided register buffer isn't large enough for the chip's
796 * full register range, the register dump will be truncated to the
797 * register buffer's size.
799 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
801 static const unsigned int t4_reg_ranges[] = {
1260 static const unsigned int t5_reg_ranges[] = {
2027 static const unsigned int t6_reg_ranges[] = {
2588 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2589 const unsigned int *reg_ranges;
2590 int reg_ranges_size, range;
2591 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2593 /* Select the right set of register ranges to dump depending on the
2594 * adapter chip type.
2596 switch (chip_version) {
2598 reg_ranges = t4_reg_ranges;
2599 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2603 reg_ranges = t5_reg_ranges;
2604 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2608 reg_ranges = t6_reg_ranges;
2609 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2613 dev_err(adap->pdev_dev,
2614 "Unsupported chip version %d\n", chip_version);
2618 /* Clear the register buffer and insert the appropriate register
2619 * values selected by the above register ranges.
2621 memset(buf, 0, buf_size);
2622 for (range = 0; range < reg_ranges_size; range += 2) {
2623 unsigned int reg = reg_ranges[range];
2624 unsigned int last_reg = reg_ranges[range + 1];
2625 u32 *bufp = (u32 *)((char *)buf + reg);
2627 /* Iterate across the register range filling in the register
2628 * buffer but don't write past the end of the register buffer.
2630 while (reg <= last_reg && bufp < buf_end) {
2631 *bufp++ = t4_read_reg(adap, reg);
2637 #define EEPROM_STAT_ADDR 0x7bfc
2638 #define VPD_SIZE 0x800
2639 #define VPD_BASE 0x400
2640 #define VPD_BASE_OLD 0
2641 #define VPD_LEN 1024
2642 #define CHELSIO_VPD_UNIQUE_ID 0x82
2645 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2646 * @phys_addr: the physical EEPROM address
2647 * @fn: the PCI function number
2648 * @sz: size of function-specific area
2650 * Translate a physical EEPROM address to virtual. The first 1K is
2651 * accessed through virtual addresses starting at 31K, the rest is
2652 * accessed through virtual addresses starting at 0.
2654 * The mapping is as follows:
2655 * [0..1K) -> [31K..32K)
2656 * [1K..1K+A) -> [31K-A..31K)
2657 * [1K+A..ES) -> [0..ES-A-1K)
2659 * where A = @fn * @sz, and ES = EEPROM size.
2661 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2664 if (phys_addr < 1024)
2665 return phys_addr + (31 << 10);
2666 if (phys_addr < 1024 + fn)
2667 return 31744 - fn + phys_addr - 1024;
2668 if (phys_addr < EEPROMSIZE)
2669 return phys_addr - 1024 - fn;
2674 * t4_seeprom_wp - enable/disable EEPROM write protection
2675 * @adapter: the adapter
2676 * @enable: whether to enable or disable write protection
2678 * Enables or disables write protection on the serial EEPROM.
2680 int t4_seeprom_wp(struct adapter *adapter, bool enable)
2682 unsigned int v = enable ? 0xc : 0;
2683 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2684 return ret < 0 ? ret : 0;
2688 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2689 * @adapter: adapter to read
2690 * @p: where to store the parameters
2692 * Reads card parameters stored in VPD EEPROM.
2694 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2696 int i, ret = 0, addr;
2699 unsigned int vpdr_len, kw_offset, id_len;
2701 vpd = vmalloc(VPD_LEN);
2705 /* We have two VPD data structures stored in the adapter VPD area.
2706 * By default, Linux calculates the size of the VPD area by traversing
2707 * the first VPD area at offset 0x0, so we need to tell the OS what
2708 * our real VPD size is.
2710 ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2714 /* Card information normally starts at VPD_BASE but early cards had
2717 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
2721 /* The VPD shall have a unique identifier specified by the PCI SIG.
2722 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2723 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2724 * is expected to automatically put this entry at the
2725 * beginning of the VPD.
2727 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2729 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2733 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2734 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
2739 id_len = pci_vpd_lrdt_size(vpd);
2740 if (id_len > ID_LEN)
2743 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2745 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
2750 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2751 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2752 if (vpdr_len + kw_offset > VPD_LEN) {
2753 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
2758 #define FIND_VPD_KW(var, name) do { \
2759 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
2761 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
2765 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2768 FIND_VPD_KW(i, "RV");
2769 for (csum = 0; i >= 0; i--)
2773 dev_err(adapter->pdev_dev,
2774 "corrupted VPD EEPROM, actual csum %u\n", csum);
2779 FIND_VPD_KW(ec, "EC");
2780 FIND_VPD_KW(sn, "SN");
2781 FIND_VPD_KW(pn, "PN");
2782 FIND_VPD_KW(na, "NA");
2785 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
2787 memcpy(p->ec, vpd + ec, EC_LEN);
2789 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2790 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2792 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
2793 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2795 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2796 strim((char *)p->na);
2800 return ret < 0 ? ret : 0;
2804 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2805 * @adapter: adapter to read
2806 * @p: where to store the parameters
2808 * Reads card parameters stored in VPD EEPROM and retrieves the Core
2809 * Clock. This can only be called after a connection to the firmware
2812 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2814 u32 cclk_param, cclk_val;
2817 /* Grab the raw VPD parameters.
2819 ret = t4_get_raw_vpd_params(adapter, p);
2823 /* Ask firmware for the Core Clock since it knows how to translate the
2824 * Reference Clock ('V2') VPD field into a Core Clock value ...
2826 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2827 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2828 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2829 1, &cclk_param, &cclk_val);
2838 /* serial flash and firmware constants */
2840 SF_ATTEMPTS = 10, /* max retries for SF operations */
2842 /* flash command opcodes */
2843 SF_PROG_PAGE = 2, /* program page */
2844 SF_WR_DISABLE = 4, /* disable writes */
2845 SF_RD_STATUS = 5, /* read status register */
2846 SF_WR_ENABLE = 6, /* enable writes */
2847 SF_RD_DATA_FAST = 0xb, /* read flash */
2848 SF_RD_ID = 0x9f, /* read ID */
2849 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2851 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
2855 * sf1_read - read data from the serial flash
2856 * @adapter: the adapter
2857 * @byte_cnt: number of bytes to read
2858 * @cont: whether another operation will be chained
2859 * @lock: whether to lock SF for PL access only
2860 * @valp: where to store the read data
2862 * Reads up to 4 bytes of data from the serial flash. The location of
2863 * the read needs to be specified prior to calling this by issuing the
2864 * appropriate commands to the serial flash.
2866 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2867 int lock, u32 *valp)
2871 if (!byte_cnt || byte_cnt > 4)
2873 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2875 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2876 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2877 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2879 *valp = t4_read_reg(adapter, SF_DATA_A);
2884 * sf1_write - write data to the serial flash
2885 * @adapter: the adapter
2886 * @byte_cnt: number of bytes to write
2887 * @cont: whether another operation will be chained
2888 * @lock: whether to lock SF for PL access only
2889 * @val: value to write
2891 * Writes up to 4 bytes of data to the serial flash. The location of
2892 * the write needs to be specified prior to calling this by issuing the
2893 * appropriate commands to the serial flash.
2895 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2898 if (!byte_cnt || byte_cnt > 4)
2900 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2902 t4_write_reg(adapter, SF_DATA_A, val);
2903 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2904 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2905 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2909 * flash_wait_op - wait for a flash operation to complete
2910 * @adapter: the adapter
2911 * @attempts: max number of polls of the status register
2912 * @delay: delay between polls in ms
2914 * Wait for a flash operation to complete by polling the status register.
2916 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
2922 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
2923 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
2927 if (--attempts == 0)
2935 * t4_read_flash - read words from serial flash
2936 * @adapter: the adapter
2937 * @addr: the start address for the read
2938 * @nwords: how many 32-bit words to read
2939 * @data: where to store the read data
2940 * @byte_oriented: whether to store data as bytes or as words
2942 * Read the specified number of 32-bit words from the serial flash.
2943 * If @byte_oriented is set the read data is stored as a byte array
2944 * (i.e., big-endian), otherwise as 32-bit words in the platform's
2945 * natural endianness.
2947 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2948 unsigned int nwords, u32 *data, int byte_oriented)
2952 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
2955 addr = swab32(addr) | SF_RD_DATA_FAST;
2957 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
2958 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
2961 for ( ; nwords; nwords--, data++) {
2962 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2964 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
2968 *data = (__force __u32)(cpu_to_be32(*data));
2974 * t4_write_flash - write up to a page of data to the serial flash
2975 * @adapter: the adapter
2976 * @addr: the start address to write
2977 * @n: length of data to write in bytes
2978 * @data: the data to write
2980 * Writes up to a page of data (256 bytes) to the serial flash starting
2981 * at the given address. All the data must be written to the same page.
2983 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
2984 unsigned int n, const u8 *data)
2988 unsigned int i, c, left, val, offset = addr & 0xff;
2990 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
2993 val = swab32(addr) | SF_PROG_PAGE;
2995 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2996 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
2999 for (left = n; left; left -= c) {
3001 for (val = 0, i = 0; i < c; ++i)
3002 val = (val << 8) + *data++;
3004 ret = sf1_write(adapter, c, c != left, 1, val);
3008 ret = flash_wait_op(adapter, 8, 1);
3012 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3014 /* Read the page to verify the write succeeded */
3015 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
3019 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3020 dev_err(adapter->pdev_dev,
3021 "failed to correctly write the flash page at %#x\n",
3028 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3033 * t4_get_fw_version - read the firmware version
3034 * @adapter: the adapter
3035 * @vers: where to place the version
3037 * Reads the FW version from flash.
3039 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3041 return t4_read_flash(adapter, FLASH_FW_START +
3042 offsetof(struct fw_hdr, fw_ver), 1,
3047 * t4_get_bs_version - read the firmware bootstrap version
3048 * @adapter: the adapter
3049 * @vers: where to place the version
3051 * Reads the FW Bootstrap version from flash.
3053 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3055 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3056 offsetof(struct fw_hdr, fw_ver), 1,
3061 * t4_get_tp_version - read the TP microcode version
3062 * @adapter: the adapter
3063 * @vers: where to place the version
3065 * Reads the TP microcode version from flash.
3067 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3069 return t4_read_flash(adapter, FLASH_FW_START +
3070 offsetof(struct fw_hdr, tp_microcode_ver),
3075 * t4_get_exprom_version - return the Expansion ROM version (if any)
3076 * @adapter: the adapter
3077 * @vers: where to place the version
3079 * Reads the Expansion ROM header from FLASH and returns the version
3080 * number (if present) through the @vers return value pointer. We return
3081 * this in the Firmware Version Format since it's convenient. Return
3082 * 0 on success, -ENOENT if no Expansion ROM is present.
3084 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3086 struct exprom_header {
3087 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3088 unsigned char hdr_ver[4]; /* Expansion ROM version */
3090 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3094 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3095 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3100 hdr = (struct exprom_header *)exprom_header_buf;
3101 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3104 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3105 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3106 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3107 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3112 * t4_get_vpd_version - return the VPD version
3113 * @adapter: the adapter
3114 * @vers: where to place the version
3116 * Reads the VPD via the Firmware interface (thus this can only be called
3117 * once we're ready to issue Firmware commands). The format of the
3118 * VPD version is adapter specific. Returns 0 on success, an error on
3121 * Note that early versions of the Firmware didn't include the ability
3122 * to retrieve the VPD version, so we zero-out the return-value parameter
3123 * in that case to avoid leaving it with garbage in it.
3125 * Also note that the Firmware will return its cached copy of the VPD
3126 * Revision ID, not the actual Revision ID as written in the Serial
3127 * EEPROM. This is only an issue if a new VPD has been written and the
3128 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3129 * to defer calling this routine till after a FW_RESET_CMD has been issued
3130 * if the Host Driver will be performing a full adapter initialization.
3132 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3137 vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3138 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3139 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3140 1, &vpdrev_param, vers);
3147 * t4_get_scfg_version - return the Serial Configuration version
3148 * @adapter: the adapter
3149 * @vers: where to place the version
3151 * Reads the Serial Configuration Version via the Firmware interface
3152 * (thus this can only be called once we're ready to issue Firmware
3153 * commands). The format of the Serial Configuration version is
3154 * adapter specific. Returns 0 on success, an error on failure.
3156 * Note that early versions of the Firmware didn't include the ability
3157 * to retrieve the Serial Configuration version, so we zero-out the
3158 * return-value parameter in that case to avoid leaving it with
3161 * Also note that the Firmware will return its cached copy of the Serial
3162 * Initialization Revision ID, not the actual Revision ID as written in
3163 * the Serial EEPROM. This is only an issue if a new VPD has been written
3164 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3165 * it's best to defer calling this routine till after a FW_RESET_CMD has
3166 * been issued if the Host Driver will be performing a full adapter
3169 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3174 scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3175 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3176 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3177 1, &scfgrev_param, vers);
3184 * t4_get_version_info - extract various chip/firmware version information
3185 * @adapter: the adapter
3187 * Reads various chip/firmware version numbers and stores them into the
3188 * adapter Adapter Parameters structure. If any of the efforts fails
3189 * the first failure will be returned, but all of the version numbers
3192 int t4_get_version_info(struct adapter *adapter)
3196 #define FIRST_RET(__getvinfo) \
3198 int __ret = __getvinfo; \
3199 if (__ret && !ret) \
3203 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3204 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3205 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3206 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3207 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3208 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3215 * t4_dump_version_info - dump all of the adapter configuration IDs
3216 * @adapter: the adapter
3218 * Dumps all of the various bits of adapter configuration version/revision
3219 * IDs information. This is typically called at some point after
3220 * t4_get_version_info() has been called.
3222 void t4_dump_version_info(struct adapter *adapter)
3224 /* Device information */
3225 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3226 adapter->params.vpd.id,
3227 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3228 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3229 adapter->params.vpd.sn, adapter->params.vpd.pn);
3231 /* Firmware Version */
3232 if (!adapter->params.fw_vers)
3233 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3235 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3236 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3237 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3238 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3239 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3241 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3242 * Firmware, so dev_info() is more appropriate here.)
3244 if (!adapter->params.bs_vers)
3245 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3247 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3248 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3249 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3250 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3251 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3253 /* TP Microcode Version */
3254 if (!adapter->params.tp_vers)
3255 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3257 dev_info(adapter->pdev_dev,
3258 "TP Microcode version: %u.%u.%u.%u\n",
3259 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3260 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3261 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3262 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3264 /* Expansion ROM version */
3265 if (!adapter->params.er_vers)
3266 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3268 dev_info(adapter->pdev_dev,
3269 "Expansion ROM version: %u.%u.%u.%u\n",
3270 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3271 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3272 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3273 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3275 /* Serial Configuration version */
3276 dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3277 adapter->params.scfg_vers);
3280 dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3281 adapter->params.vpd_vers);
3285 * t4_check_fw_version - check if the FW is supported with this driver
3286 * @adap: the adapter
3288 * Checks if an adapter's FW is compatible with the driver. Returns 0
3289 * if there's exact match, a negative error if the version could not be
3290 * read or there's a major version mismatch
3292 int t4_check_fw_version(struct adapter *adap)
3294 int i, ret, major, minor, micro;
3295 int exp_major, exp_minor, exp_micro;
3296 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3298 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3299 /* Try multiple times before returning error */
3300 for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3301 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3306 major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3307 minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3308 micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3310 switch (chip_version) {
3312 exp_major = T4FW_MIN_VERSION_MAJOR;
3313 exp_minor = T4FW_MIN_VERSION_MINOR;
3314 exp_micro = T4FW_MIN_VERSION_MICRO;
3317 exp_major = T5FW_MIN_VERSION_MAJOR;
3318 exp_minor = T5FW_MIN_VERSION_MINOR;
3319 exp_micro = T5FW_MIN_VERSION_MICRO;
3322 exp_major = T6FW_MIN_VERSION_MAJOR;
3323 exp_minor = T6FW_MIN_VERSION_MINOR;
3324 exp_micro = T6FW_MIN_VERSION_MICRO;
3327 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3332 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3333 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3334 dev_err(adap->pdev_dev,
3335 "Card has firmware version %u.%u.%u, minimum "
3336 "supported firmware is %u.%u.%u.\n", major, minor,
3337 micro, exp_major, exp_minor, exp_micro);
3343 /* Is the given firmware API compatible with the one the driver was compiled
3346 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3349 /* short circuit if it's the exact same firmware version */
3350 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3353 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3354 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3355 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3362 /* The firmware in the filesystem is usable, but should it be installed?
3363 * This routine explains itself in detail if it indicates the filesystem
3364 * firmware should be installed.
3366 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3371 if (!card_fw_usable) {
3372 reason = "incompatible or unusable";
3377 reason = "older than the version supported with this driver";
3384 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3385 "installing firmware %u.%u.%u.%u on card.\n",
3386 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3387 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3388 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3389 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3394 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3395 const u8 *fw_data, unsigned int fw_size,
3396 struct fw_hdr *card_fw, enum dev_state state,
3399 int ret, card_fw_usable, fs_fw_usable;
3400 const struct fw_hdr *fs_fw;
3401 const struct fw_hdr *drv_fw;
3403 drv_fw = &fw_info->fw_hdr;
3405 /* Read the header of the firmware on the card */
3406 ret = -t4_read_flash(adap, FLASH_FW_START,
3407 sizeof(*card_fw) / sizeof(uint32_t),
3408 (uint32_t *)card_fw, 1);
3410 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3412 dev_err(adap->pdev_dev,
3413 "Unable to read card's firmware header: %d\n", ret);
3417 if (fw_data != NULL) {
3418 fs_fw = (const void *)fw_data;
3419 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3425 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3426 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3427 /* Common case: the firmware on the card is an exact match and
3428 * the filesystem one is an exact match too, or the filesystem
3429 * one is absent/incompatible.
3431 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3432 should_install_fs_fw(adap, card_fw_usable,
3433 be32_to_cpu(fs_fw->fw_ver),
3434 be32_to_cpu(card_fw->fw_ver))) {
3435 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
3438 dev_err(adap->pdev_dev,
3439 "failed to install firmware: %d\n", ret);
3443 /* Installed successfully, update the cached header too. */
3446 *reset = 0; /* already reset as part of load_fw */
3449 if (!card_fw_usable) {
3452 d = be32_to_cpu(drv_fw->fw_ver);
3453 c = be32_to_cpu(card_fw->fw_ver);
3454 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3456 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3458 "driver compiled with %d.%d.%d.%d, "
3459 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3461 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3462 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3463 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3464 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3465 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3466 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3471 /* We're using whatever's on the card and it's known to be good. */
3472 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3473 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3480 * t4_flash_erase_sectors - erase a range of flash sectors
3481 * @adapter: the adapter
3482 * @start: the first sector to erase
3483 * @end: the last sector to erase
3485 * Erases the sectors in the given inclusive range.
3487 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3491 if (end >= adapter->params.sf_nsec)
3494 while (start <= end) {
3495 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3496 (ret = sf1_write(adapter, 4, 0, 1,
3497 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3498 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3499 dev_err(adapter->pdev_dev,
3500 "erase of flash sector %d failed, error %d\n",
3506 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3511 * t4_flash_cfg_addr - return the address of the flash configuration file
3512 * @adapter: the adapter
3514 * Return the address within the flash where the Firmware Configuration
3517 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3519 if (adapter->params.sf_size == 0x100000)
3520 return FLASH_FPGA_CFG_START;
3522 return FLASH_CFG_START;
3525 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
3526 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3527 * and emit an error message for mismatched firmware to save our caller the
3530 static bool t4_fw_matches_chip(const struct adapter *adap,
3531 const struct fw_hdr *hdr)
3533 /* The expression below will return FALSE for any unsupported adapter
3534 * which will keep us "honest" in the future ...
3536 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3537 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3538 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3541 dev_err(adap->pdev_dev,
3542 "FW image (%d) is not suitable for this adapter (%d)\n",
3543 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3548 * t4_load_fw - download firmware
3549 * @adap: the adapter
3550 * @fw_data: the firmware image to write
3553 * Write the supplied firmware image to the card's serial flash.
3555 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3560 u8 first_page[SF_PAGE_SIZE];
3561 const __be32 *p = (const __be32 *)fw_data;
3562 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3563 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3564 unsigned int fw_img_start = adap->params.sf_fw_start;
3565 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
3568 dev_err(adap->pdev_dev, "FW image has no data\n");
3572 dev_err(adap->pdev_dev,
3573 "FW image size not multiple of 512 bytes\n");
3576 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3577 dev_err(adap->pdev_dev,
3578 "FW image size differs from size in FW header\n");
3581 if (size > FW_MAX_SIZE) {
3582 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3586 if (!t4_fw_matches_chip(adap, hdr))
3589 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3590 csum += be32_to_cpu(p[i]);
3592 if (csum != 0xffffffff) {
3593 dev_err(adap->pdev_dev,
3594 "corrupted firmware image, checksum %#x\n", csum);
3598 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3599 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3604 * We write the correct version at the end so the driver can see a bad
3605 * version if the FW write fails. Start by writing a copy of the
3606 * first page with a bad version.
3608 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3609 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3610 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
3614 addr = fw_img_start;
3615 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3616 addr += SF_PAGE_SIZE;
3617 fw_data += SF_PAGE_SIZE;
3618 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
3623 ret = t4_write_flash(adap,
3624 fw_img_start + offsetof(struct fw_hdr, fw_ver),
3625 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3628 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3631 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3636 * t4_phy_fw_ver - return current PHY firmware version
3637 * @adap: the adapter
3638 * @phy_fw_ver: return value buffer for PHY firmware version
3640 * Returns the current version of external PHY firmware on the
3643 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3648 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3649 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3650 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3651 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3652 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3661 * t4_load_phy_fw - download port PHY firmware
3662 * @adap: the adapter
3663 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
3664 * @win_lock: the lock to use to guard the memory copy
3665 * @phy_fw_version: function to check PHY firmware versions
3666 * @phy_fw_data: the PHY firmware image to write
3667 * @phy_fw_size: image size
3669 * Transfer the specified PHY firmware to the adapter. If a non-NULL
3670 * @phy_fw_version is supplied, then it will be used to determine if
3671 * it's necessary to perform the transfer by comparing the version
3672 * of any existing adapter PHY firmware with that of the passed in
3673 * PHY firmware image. If @win_lock is non-NULL then it will be used
3674 * around the call to t4_memory_rw() which transfers the PHY firmware
3677 * A negative error number will be returned if an error occurs. If
3678 * version number support is available and there's no need to upgrade
3679 * the firmware, 0 will be returned. If firmware is successfully
3680 * transferred to the adapter, 1 will be retured.
3682 * NOTE: some adapters only have local RAM to store the PHY firmware. As
3683 * a result, a RESET of the adapter would cause that RAM to lose its
3684 * contents. Thus, loading PHY firmware on such adapters must happen
3685 * after any FW_RESET_CMDs ...
3687 int t4_load_phy_fw(struct adapter *adap,
3688 int win, spinlock_t *win_lock,
3689 int (*phy_fw_version)(const u8 *, size_t),
3690 const u8 *phy_fw_data, size_t phy_fw_size)
3692 unsigned long mtype = 0, maddr = 0;
3694 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3697 /* If we have version number support, then check to see if the adapter
3698 * already has up-to-date PHY firmware loaded.
3700 if (phy_fw_version) {
3701 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3702 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3706 if (cur_phy_fw_ver >= new_phy_fw_vers) {
3707 CH_WARN(adap, "PHY Firmware already up-to-date, "
3708 "version %#x\n", cur_phy_fw_ver);
3713 /* Ask the firmware where it wants us to copy the PHY firmware image.
3714 * The size of the file requires a special version of the READ coommand
3715 * which will pass the file size via the values field in PARAMS_CMD and
3716 * retrieve the return value from firmware and place it in the same
3719 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3720 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3721 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3722 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3724 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3725 ¶m, &val, 1, true);
3729 maddr = (val & 0xff) << 16;
3731 /* Copy the supplied PHY Firmware image to the adapter memory location
3732 * allocated by the adapter firmware.
3735 spin_lock_bh(win_lock);
3736 ret = t4_memory_rw(adap, win, mtype, maddr,
3737 phy_fw_size, (__be32 *)phy_fw_data,
3740 spin_unlock_bh(win_lock);
3744 /* Tell the firmware that the PHY firmware image has been written to
3745 * RAM and it can now start copying it over to the PHYs. The chip
3746 * firmware will RESET the affected PHYs as part of this operation
3747 * leaving them running the new PHY firmware image.
3749 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3750 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3751 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3752 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3753 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3754 ¶m, &val, 30000);
3756 /* If we have version number support, then check to see that the new
3757 * firmware got loaded properly.
3759 if (phy_fw_version) {
3760 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3764 if (cur_phy_fw_ver != new_phy_fw_vers) {
3765 CH_WARN(adap, "PHY Firmware did not update: "
3766 "version on adapter %#x, "
3767 "version flashed %#x\n",
3768 cur_phy_fw_ver, new_phy_fw_vers);
3777 * t4_fwcache - firmware cache operation
3778 * @adap: the adapter
3779 * @op : the operation (flush or flush and invalidate)
3781 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3783 struct fw_params_cmd c;
3785 memset(&c, 0, sizeof(c));
3787 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3788 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3789 FW_PARAMS_CMD_PFN_V(adap->pf) |
3790 FW_PARAMS_CMD_VFN_V(0));
3791 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3793 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3794 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3795 c.param[0].val = (__force __be32)op;
3797 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3800 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3801 unsigned int *pif_req_wrptr,
3802 unsigned int *pif_rsp_wrptr)
3805 u32 cfg, val, req, rsp;
3807 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3808 if (cfg & LADBGEN_F)
3809 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3811 val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3812 req = POLADBGWRPTR_G(val);
3813 rsp = PILADBGWRPTR_G(val);
3815 *pif_req_wrptr = req;
3817 *pif_rsp_wrptr = rsp;
3819 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3820 for (j = 0; j < 6; j++) {
3821 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3822 PILADBGRDPTR_V(rsp));
3823 *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3824 *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3828 req = (req + 2) & POLADBGRDPTR_M;
3829 rsp = (rsp + 2) & PILADBGRDPTR_M;
3831 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3834 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3839 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3840 if (cfg & LADBGEN_F)
3841 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3843 for (i = 0; i < CIM_MALA_SIZE; i++) {
3844 for (j = 0; j < 5; j++) {
3846 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3847 PILADBGRDPTR_V(idx));
3848 *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3849 *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3852 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3855 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3859 for (i = 0; i < 8; i++) {
3860 u32 *p = la_buf + i;
3862 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3863 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3864 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3865 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3866 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3870 #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3874 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3875 * @caps16: a 16-bit Port Capabilities value
3877 * Returns the equivalent 32-bit Port Capabilities value.
3879 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3881 fw_port_cap32_t caps32 = 0;
3883 #define CAP16_TO_CAP32(__cap) \
3885 if (caps16 & FW_PORT_CAP_##__cap) \
3886 caps32 |= FW_PORT_CAP32_##__cap; \
3889 CAP16_TO_CAP32(SPEED_100M);
3890 CAP16_TO_CAP32(SPEED_1G);
3891 CAP16_TO_CAP32(SPEED_25G);
3892 CAP16_TO_CAP32(SPEED_10G);
3893 CAP16_TO_CAP32(SPEED_40G);
3894 CAP16_TO_CAP32(SPEED_100G);
3895 CAP16_TO_CAP32(FC_RX);
3896 CAP16_TO_CAP32(FC_TX);
3897 CAP16_TO_CAP32(ANEG);
3898 CAP16_TO_CAP32(MDIX);
3899 CAP16_TO_CAP32(MDIAUTO);
3900 CAP16_TO_CAP32(FEC_RS);
3901 CAP16_TO_CAP32(FEC_BASER_RS);
3902 CAP16_TO_CAP32(802_3_PAUSE);
3903 CAP16_TO_CAP32(802_3_ASM_DIR);
3905 #undef CAP16_TO_CAP32
3911 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3912 * @caps32: a 32-bit Port Capabilities value
3914 * Returns the equivalent 16-bit Port Capabilities value. Note that
3915 * not all 32-bit Port Capabilities can be represented in the 16-bit
3916 * Port Capabilities and some fields/values may not make it.
3918 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
3920 fw_port_cap16_t caps16 = 0;
3922 #define CAP32_TO_CAP16(__cap) \
3924 if (caps32 & FW_PORT_CAP32_##__cap) \
3925 caps16 |= FW_PORT_CAP_##__cap; \
3928 CAP32_TO_CAP16(SPEED_100M);
3929 CAP32_TO_CAP16(SPEED_1G);
3930 CAP32_TO_CAP16(SPEED_10G);
3931 CAP32_TO_CAP16(SPEED_25G);
3932 CAP32_TO_CAP16(SPEED_40G);
3933 CAP32_TO_CAP16(SPEED_100G);
3934 CAP32_TO_CAP16(FC_RX);
3935 CAP32_TO_CAP16(FC_TX);
3936 CAP32_TO_CAP16(802_3_PAUSE);
3937 CAP32_TO_CAP16(802_3_ASM_DIR);
3938 CAP32_TO_CAP16(ANEG);
3939 CAP32_TO_CAP16(MDIX);
3940 CAP32_TO_CAP16(MDIAUTO);
3941 CAP32_TO_CAP16(FEC_RS);
3942 CAP32_TO_CAP16(FEC_BASER_RS);
3944 #undef CAP32_TO_CAP16
3949 /* Translate Firmware Port Capabilities Pause specification to Common Code */
3950 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
3952 enum cc_pause cc_pause = 0;
3954 if (fw_pause & FW_PORT_CAP32_FC_RX)
3955 cc_pause |= PAUSE_RX;
3956 if (fw_pause & FW_PORT_CAP32_FC_TX)
3957 cc_pause |= PAUSE_TX;
3962 /* Translate Common Code Pause specification into Firmware Port Capabilities */
3963 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
3965 fw_port_cap32_t fw_pause = 0;
3967 if (cc_pause & PAUSE_RX)
3968 fw_pause |= FW_PORT_CAP32_FC_RX;
3969 if (cc_pause & PAUSE_TX)
3970 fw_pause |= FW_PORT_CAP32_FC_TX;
3975 /* Translate Firmware Forward Error Correction specification to Common Code */
3976 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
3978 enum cc_fec cc_fec = 0;
3980 if (fw_fec & FW_PORT_CAP32_FEC_RS)
3982 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
3983 cc_fec |= FEC_BASER_RS;
3988 /* Translate Common Code Forward Error Correction specification to Firmware */
3989 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
3991 fw_port_cap32_t fw_fec = 0;
3993 if (cc_fec & FEC_RS)
3994 fw_fec |= FW_PORT_CAP32_FEC_RS;
3995 if (cc_fec & FEC_BASER_RS)
3996 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
4002 * t4_link_l1cfg - apply link configuration to MAC/PHY
4003 * @adapter: the adapter
4004 * @mbox: the Firmware Mailbox to use
4005 * @port: the Port ID
4006 * @lc: the Port's Link Configuration
4008 * Set up a port's MAC and PHY according to a desired link configuration.
4009 * - If the PHY can auto-negotiate first decide what to advertise, then
4010 * enable/disable auto-negotiation as desired, and reset.
4011 * - If the PHY does not auto-negotiate just reset it.
4012 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4013 * otherwise do it later based on the outcome of auto-negotiation.
4015 int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
4016 unsigned int port, struct link_config *lc)
4018 unsigned int fw_caps = adapter->params.fw_caps_support;
4019 struct fw_port_cmd cmd;
4020 unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO);
4021 fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
4025 /* Convert driver coding of Pause Frame Flow Control settings into the
4028 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4030 /* Convert Common Code Forward Error Control settings into the
4031 * Firmware's API. If the current Requested FEC has "Automatic"
4032 * (IEEE 802.3) specified, then we use whatever the Firmware
4033 * sent us as part of it's IEEE 802.3-based interpratation of
4034 * the Transceiver Module EPROM FEC parameters. Otherwise we
4035 * use whatever is in the current Requested FEC settings.
4037 if (lc->requested_fec & FEC_AUTO)
4038 cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4040 cc_fec = lc->requested_fec;
4041 fw_fec = cc_to_fwcap_fec(cc_fec);
4043 /* Figure out what our Requested Port Capabilities are going to be.
4045 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4046 rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
4047 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4049 } else if (lc->autoneg == AUTONEG_DISABLE) {
4050 rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4051 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4054 rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
4057 /* And send that on to the Firmware ...
4059 memset(&cmd, 0, sizeof(cmd));
4060 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4061 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4062 FW_PORT_CMD_PORTID_V(port));
4063 cmd.action_to_len16 =
4064 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4065 ? FW_PORT_ACTION_L1_CFG
4066 : FW_PORT_ACTION_L1_CFG32) |
4068 if (fw_caps == FW_CAPS16)
4069 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4071 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4072 return t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4076 * t4_restart_aneg - restart autonegotiation
4077 * @adap: the adapter
4078 * @mbox: mbox to use for the FW command
4079 * @port: the port id
4081 * Restarts autonegotiation for the selected port.
4083 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4085 struct fw_port_cmd c;
4087 memset(&c, 0, sizeof(c));
4088 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4089 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4090 FW_PORT_CMD_PORTID_V(port));
4092 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
4094 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
4095 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4098 typedef void (*int_handler_t)(struct adapter *adap);
4101 unsigned int mask; /* bits to check in interrupt status */
4102 const char *msg; /* message to print or NULL */
4103 short stat_idx; /* stat counter to increment or -1 */
4104 unsigned short fatal; /* whether the condition reported is fatal */
4105 int_handler_t int_handler; /* platform-specific int handler */
4109 * t4_handle_intr_status - table driven interrupt handler
4110 * @adapter: the adapter that generated the interrupt
4111 * @reg: the interrupt status register to process
4112 * @acts: table of interrupt actions
4114 * A table driven interrupt handler that applies a set of masks to an
4115 * interrupt status word and performs the corresponding actions if the
4116 * interrupts described by the mask have occurred. The actions include
4117 * optionally emitting a warning or alert message. The table is terminated
4118 * by an entry specifying mask 0. Returns the number of fatal interrupt
4121 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4122 const struct intr_info *acts)
4125 unsigned int mask = 0;
4126 unsigned int status = t4_read_reg(adapter, reg);
4128 for ( ; acts->mask; ++acts) {
4129 if (!(status & acts->mask))
4133 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4134 status & acts->mask);
4135 } else if (acts->msg && printk_ratelimit())
4136 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4137 status & acts->mask);
4138 if (acts->int_handler)
4139 acts->int_handler(adapter);
4143 if (status) /* clear processed interrupts */
4144 t4_write_reg(adapter, reg, status);
4149 * Interrupt handler for the PCIE module.
4151 static void pcie_intr_handler(struct adapter *adapter)
4153 static const struct intr_info sysbus_intr_info[] = {
4154 { RNPP_F, "RXNP array parity error", -1, 1 },
4155 { RPCP_F, "RXPC array parity error", -1, 1 },
4156 { RCIP_F, "RXCIF array parity error", -1, 1 },
4157 { RCCP_F, "Rx completions control array parity error", -1, 1 },
4158 { RFTP_F, "RXFT array parity error", -1, 1 },
4161 static const struct intr_info pcie_port_intr_info[] = {
4162 { TPCP_F, "TXPC array parity error", -1, 1 },
4163 { TNPP_F, "TXNP array parity error", -1, 1 },
4164 { TFTP_F, "TXFT array parity error", -1, 1 },
4165 { TCAP_F, "TXCA array parity error", -1, 1 },
4166 { TCIP_F, "TXCIF array parity error", -1, 1 },
4167 { RCAP_F, "RXCA array parity error", -1, 1 },
4168 { OTDD_F, "outbound request TLP discarded", -1, 1 },
4169 { RDPE_F, "Rx data parity error", -1, 1 },
4170 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
4173 static const struct intr_info pcie_intr_info[] = {
4174 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4175 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4176 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4177 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4178 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4179 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4180 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4181 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4182 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4183 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4184 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4185 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4186 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4187 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4188 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4189 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4190 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4191 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4192 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4193 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4194 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4195 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4196 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4197 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4198 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4199 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4200 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4201 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
4202 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
4203 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
4208 static struct intr_info t5_pcie_intr_info[] = {
4209 { MSTGRPPERR_F, "Master Response Read Queue parity error",
4211 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4212 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4213 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4214 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4215 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4216 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4217 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
4219 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
4221 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4222 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4223 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4224 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4225 { DREQWRPERR_F, "PCI DMA channel write request parity error",
4227 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4228 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4229 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4230 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4231 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4232 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4233 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4234 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4235 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4236 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4237 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
4239 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4241 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4242 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4243 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4244 { READRSPERR_F, "Outbound read error", -1, 0 },
4250 if (is_t4(adapter->params.chip))
4251 fat = t4_handle_intr_status(adapter,
4252 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4254 t4_handle_intr_status(adapter,
4255 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4256 pcie_port_intr_info) +
4257 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4260 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4264 t4_fatal_err(adapter);
4268 * TP interrupt handler.
4270 static void tp_intr_handler(struct adapter *adapter)
4272 static const struct intr_info tp_intr_info[] = {
4273 { 0x3fffffff, "TP parity error", -1, 1 },
4274 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
4278 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
4279 t4_fatal_err(adapter);
4283 * SGE interrupt handler.
4285 static void sge_intr_handler(struct adapter *adapter)
4290 static const struct intr_info sge_intr_info[] = {
4291 { ERR_CPL_EXCEED_IQE_SIZE_F,
4292 "SGE received CPL exceeding IQE size", -1, 1 },
4293 { ERR_INVALID_CIDX_INC_F,
4294 "SGE GTS CIDX increment too large", -1, 0 },
4295 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4296 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
4297 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
4298 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4299 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
4301 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
4303 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
4305 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
4307 { ERR_ING_CTXT_PRIO_F,
4308 "SGE too many priority ingress contexts", -1, 0 },
4309 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4310 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
4314 static struct intr_info t4t5_sge_intr_info[] = {
4315 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4316 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4317 { ERR_EGR_CTXT_PRIO_F,
4318 "SGE too many priority egress contexts", -1, 0 },
4322 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
4323 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
4325 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
4326 (unsigned long long)v);
4327 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
4328 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
4331 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4332 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4333 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4334 t4t5_sge_intr_info);
4336 err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4337 if (err & ERROR_QID_VALID_F) {
4338 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4340 if (err & UNCAPTURED_ERROR_F)
4341 dev_err(adapter->pdev_dev,
4342 "SGE UNCAPTURED_ERROR set (clearing)\n");
4343 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4344 UNCAPTURED_ERROR_F);
4348 t4_fatal_err(adapter);
4351 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4352 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4353 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4354 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4357 * CIM interrupt handler.
4359 static void cim_intr_handler(struct adapter *adapter)
4361 static const struct intr_info cim_intr_info[] = {
4362 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4363 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4364 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4365 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4366 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4367 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4368 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4369 { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4372 static const struct intr_info cim_upintr_info[] = {
4373 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4374 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4375 { ILLWRINT_F, "CIM illegal write", -1, 1 },
4376 { ILLRDINT_F, "CIM illegal read", -1, 1 },
4377 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4378 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4379 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4380 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4381 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4382 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4383 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4384 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4385 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4386 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4387 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4388 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4389 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4390 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4391 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4392 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4393 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4394 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4395 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4396 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4397 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4398 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4399 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4400 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4407 fw_err = t4_read_reg(adapter, PCIE_FW_A);
4408 if (fw_err & PCIE_FW_ERR_F)
4409 t4_report_fw_error(adapter);
4411 /* When the Firmware detects an internal error which normally
4412 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4413 * in order to make sure the Host sees the Firmware Crash. So
4414 * if we have a Timer0 interrupt and don't see a Firmware Crash,
4415 * ignore the Timer0 interrupt.
4418 val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4419 if (val & TIMER0INT_F)
4420 if (!(fw_err & PCIE_FW_ERR_F) ||
4421 (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4422 t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4425 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4427 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4430 t4_fatal_err(adapter);
4434 * ULP RX interrupt handler.
4436 static void ulprx_intr_handler(struct adapter *adapter)
4438 static const struct intr_info ulprx_intr_info[] = {
4439 { 0x1800000, "ULPRX context error", -1, 1 },
4440 { 0x7fffff, "ULPRX parity error", -1, 1 },
4444 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4445 t4_fatal_err(adapter);
4449 * ULP TX interrupt handler.
4451 static void ulptx_intr_handler(struct adapter *adapter)
4453 static const struct intr_info ulptx_intr_info[] = {
4454 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4456 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4458 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4460 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4462 { 0xfffffff, "ULPTX parity error", -1, 1 },
4466 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4467 t4_fatal_err(adapter);
4471 * PM TX interrupt handler.
4473 static void pmtx_intr_handler(struct adapter *adapter)
4475 static const struct intr_info pmtx_intr_info[] = {
4476 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4477 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4478 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4479 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4480 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4481 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4482 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4484 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4485 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4489 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4490 t4_fatal_err(adapter);
4494 * PM RX interrupt handler.
4496 static void pmrx_intr_handler(struct adapter *adapter)
4498 static const struct intr_info pmrx_intr_info[] = {
4499 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4500 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4501 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4502 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4504 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4505 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4509 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4510 t4_fatal_err(adapter);
4514 * CPL switch interrupt handler.
4516 static void cplsw_intr_handler(struct adapter *adapter)
4518 static const struct intr_info cplsw_intr_info[] = {
4519 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4520 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4521 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4522 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4523 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4524 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4528 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4529 t4_fatal_err(adapter);
4533 * LE interrupt handler.
4535 static void le_intr_handler(struct adapter *adap)
4537 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4538 static const struct intr_info le_intr_info[] = {
4539 { LIPMISS_F, "LE LIP miss", -1, 0 },
4540 { LIP0_F, "LE 0 LIP error", -1, 0 },
4541 { PARITYERR_F, "LE parity error", -1, 1 },
4542 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4543 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
4547 static struct intr_info t6_le_intr_info[] = {
4548 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4549 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4550 { TCAMINTPERR_F, "LE parity error", -1, 1 },
4551 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4552 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4556 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4557 (chip <= CHELSIO_T5) ?
4558 le_intr_info : t6_le_intr_info))
4563 * MPS interrupt handler.
4565 static void mps_intr_handler(struct adapter *adapter)
4567 static const struct intr_info mps_rx_intr_info[] = {
4568 { 0xffffff, "MPS Rx parity error", -1, 1 },
4571 static const struct intr_info mps_tx_intr_info[] = {
4572 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4573 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4574 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4576 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4578 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4579 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4580 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4583 static const struct intr_info t6_mps_tx_intr_info[] = {
4584 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4585 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4586 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4588 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4590 /* MPS Tx Bubble is normal for T6 */
4591 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4592 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4595 static const struct intr_info mps_trc_intr_info[] = {
4596 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4597 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4599 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4602 static const struct intr_info mps_stat_sram_intr_info[] = {
4603 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4606 static const struct intr_info mps_stat_tx_intr_info[] = {
4607 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4610 static const struct intr_info mps_stat_rx_intr_info[] = {
4611 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4614 static const struct intr_info mps_cls_intr_info[] = {
4615 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4616 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4617 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4623 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4625 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4626 is_t6(adapter->params.chip)
4627 ? t6_mps_tx_intr_info
4628 : mps_tx_intr_info) +
4629 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4630 mps_trc_intr_info) +
4631 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4632 mps_stat_sram_intr_info) +
4633 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4634 mps_stat_tx_intr_info) +
4635 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4636 mps_stat_rx_intr_info) +
4637 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4640 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4641 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
4643 t4_fatal_err(adapter);
4646 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4650 * EDC/MC interrupt handler.
4652 static void mem_intr_handler(struct adapter *adapter, int idx)
4654 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4656 unsigned int addr, cnt_addr, v;
4658 if (idx <= MEM_EDC1) {
4659 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4660 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4661 } else if (idx == MEM_MC) {
4662 if (is_t4(adapter->params.chip)) {
4663 addr = MC_INT_CAUSE_A;
4664 cnt_addr = MC_ECC_STATUS_A;
4666 addr = MC_P_INT_CAUSE_A;
4667 cnt_addr = MC_P_ECC_STATUS_A;
4670 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4671 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4674 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4675 if (v & PERR_INT_CAUSE_F)
4676 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4678 if (v & ECC_CE_INT_CAUSE_F) {
4679 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4681 t4_edc_err_read(adapter, idx);
4683 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4684 if (printk_ratelimit())
4685 dev_warn(adapter->pdev_dev,
4686 "%u %s correctable ECC data error%s\n",
4687 cnt, name[idx], cnt > 1 ? "s" : "");
4689 if (v & ECC_UE_INT_CAUSE_F)
4690 dev_alert(adapter->pdev_dev,
4691 "%s uncorrectable ECC data error\n", name[idx]);
4693 t4_write_reg(adapter, addr, v);
4694 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4695 t4_fatal_err(adapter);
4699 * MA interrupt handler.
4701 static void ma_intr_handler(struct adapter *adap)
4703 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4705 if (status & MEM_PERR_INT_CAUSE_F) {
4706 dev_alert(adap->pdev_dev,
4707 "MA parity error, parity status %#x\n",
4708 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4709 if (is_t5(adap->params.chip))
4710 dev_alert(adap->pdev_dev,
4711 "MA parity error, parity status %#x\n",
4713 MA_PARITY_ERROR_STATUS2_A));
4715 if (status & MEM_WRAP_INT_CAUSE_F) {
4716 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4717 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4718 "client %u to address %#x\n",
4719 MEM_WRAP_CLIENT_NUM_G(v),
4720 MEM_WRAP_ADDRESS_G(v) << 4);
4722 t4_write_reg(adap, MA_INT_CAUSE_A, status);
4727 * SMB interrupt handler.
4729 static void smb_intr_handler(struct adapter *adap)
4731 static const struct intr_info smb_intr_info[] = {
4732 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4733 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4734 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4738 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4743 * NC-SI interrupt handler.
4745 static void ncsi_intr_handler(struct adapter *adap)
4747 static const struct intr_info ncsi_intr_info[] = {
4748 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4749 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4750 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4751 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4755 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4760 * XGMAC interrupt handler.
4762 static void xgmac_intr_handler(struct adapter *adap, int port)
4764 u32 v, int_cause_reg;
4766 if (is_t4(adap->params.chip))
4767 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4769 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4771 v = t4_read_reg(adap, int_cause_reg);
4773 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4777 if (v & TXFIFO_PRTY_ERR_F)
4778 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4780 if (v & RXFIFO_PRTY_ERR_F)
4781 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4783 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4788 * PL interrupt handler.
4790 static void pl_intr_handler(struct adapter *adap)
4792 static const struct intr_info pl_intr_info[] = {
4793 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
4794 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
4798 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
4802 #define PF_INTR_MASK (PFSW_F)
4803 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
4804 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
4805 CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
4808 * t4_slow_intr_handler - control path interrupt handler
4809 * @adapter: the adapter
4811 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4812 * The designation 'slow' is because it involves register reads, while
4813 * data interrupts typically don't involve any MMIOs.
4815 int t4_slow_intr_handler(struct adapter *adapter)
4817 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
4819 if (!(cause & GLBL_INTR_MASK))
4822 cim_intr_handler(adapter);
4824 mps_intr_handler(adapter);
4826 ncsi_intr_handler(adapter);
4828 pl_intr_handler(adapter);
4830 smb_intr_handler(adapter);
4831 if (cause & XGMAC0_F)
4832 xgmac_intr_handler(adapter, 0);
4833 if (cause & XGMAC1_F)
4834 xgmac_intr_handler(adapter, 1);
4835 if (cause & XGMAC_KR0_F)
4836 xgmac_intr_handler(adapter, 2);
4837 if (cause & XGMAC_KR1_F)
4838 xgmac_intr_handler(adapter, 3);
4840 pcie_intr_handler(adapter);
4842 mem_intr_handler(adapter, MEM_MC);
4843 if (is_t5(adapter->params.chip) && (cause & MC1_F))
4844 mem_intr_handler(adapter, MEM_MC1);
4846 mem_intr_handler(adapter, MEM_EDC0);
4848 mem_intr_handler(adapter, MEM_EDC1);
4850 le_intr_handler(adapter);
4852 tp_intr_handler(adapter);
4854 ma_intr_handler(adapter);
4855 if (cause & PM_TX_F)
4856 pmtx_intr_handler(adapter);
4857 if (cause & PM_RX_F)
4858 pmrx_intr_handler(adapter);
4859 if (cause & ULP_RX_F)
4860 ulprx_intr_handler(adapter);
4861 if (cause & CPL_SWITCH_F)
4862 cplsw_intr_handler(adapter);
4864 sge_intr_handler(adapter);
4865 if (cause & ULP_TX_F)
4866 ulptx_intr_handler(adapter);
4868 /* Clear the interrupts just processed for which we are the master. */
4869 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
4870 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
4875 * t4_intr_enable - enable interrupts
4876 * @adapter: the adapter whose interrupts should be enabled
4878 * Enable PF-specific interrupts for the calling function and the top-level
4879 * interrupt concentrator for global interrupts. Interrupts are already
4880 * enabled at each module, here we just enable the roots of the interrupt
4883 * Note: this function should be called only when the driver manages
4884 * non PF-specific interrupts from the various HW modules. Only one PCI
4885 * function at a time should be doing this.
4887 void t4_intr_enable(struct adapter *adapter)
4890 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4891 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4892 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4894 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4895 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
4896 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
4897 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
4898 ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
4899 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
4900 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
4901 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
4902 DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
4903 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
4904 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
4908 * t4_intr_disable - disable interrupts
4909 * @adapter: the adapter whose interrupts should be disabled
4911 * Disable interrupts. We only disable the top-level interrupt
4912 * concentrators. The caller must be a PCI function managing global
4915 void t4_intr_disable(struct adapter *adapter)
4919 if (pci_channel_offline(adapter->pdev))
4922 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4923 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4924 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4926 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
4927 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
4931 * t4_config_rss_range - configure a portion of the RSS mapping table
4932 * @adapter: the adapter
4933 * @mbox: mbox to use for the FW command
4934 * @viid: virtual interface whose RSS subtable is to be written
4935 * @start: start entry in the table to write
4936 * @n: how many table entries to write
4937 * @rspq: values for the response queue lookup table
4938 * @nrspq: number of values in @rspq
4940 * Programs the selected part of the VI's RSS mapping table with the
4941 * provided values. If @nrspq < @n the supplied values are used repeatedly
4942 * until the full table range is populated.
4944 * The caller must ensure the values in @rspq are in the range allowed for
4947 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4948 int start, int n, const u16 *rspq, unsigned int nrspq)
4951 const u16 *rsp = rspq;
4952 const u16 *rsp_end = rspq + nrspq;
4953 struct fw_rss_ind_tbl_cmd cmd;
4955 memset(&cmd, 0, sizeof(cmd));
4956 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
4957 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4958 FW_RSS_IND_TBL_CMD_VIID_V(viid));
4959 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4961 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
4963 int nq = min(n, 32);
4964 __be32 *qp = &cmd.iq0_to_iq2;
4966 cmd.niqid = cpu_to_be16(nq);
4967 cmd.startidx = cpu_to_be16(start);
4975 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
4976 if (++rsp >= rsp_end)
4978 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
4979 if (++rsp >= rsp_end)
4981 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
4982 if (++rsp >= rsp_end)
4985 *qp++ = cpu_to_be32(v);
4989 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4997 * t4_config_glbl_rss - configure the global RSS mode
4998 * @adapter: the adapter
4999 * @mbox: mbox to use for the FW command
5000 * @mode: global RSS mode
5001 * @flags: mode-specific flags
5003 * Sets the global RSS mode.
5005 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5008 struct fw_rss_glb_config_cmd c;
5010 memset(&c, 0, sizeof(c));
5011 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5012 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5013 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5014 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5015 c.u.manual.mode_pkd =
5016 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5017 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5018 c.u.basicvirtual.mode_pkd =
5019 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5020 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5023 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5027 * t4_config_vi_rss - configure per VI RSS settings
5028 * @adapter: the adapter
5029 * @mbox: mbox to use for the FW command
5032 * @defq: id of the default RSS queue for the VI.
5034 * Configures VI-specific RSS properties.
5036 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5037 unsigned int flags, unsigned int defq)
5039 struct fw_rss_vi_config_cmd c;
5041 memset(&c, 0, sizeof(c));
5042 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5043 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5044 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5045 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5046 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5047 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5048 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5051 /* Read an RSS table row */
5052 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5054 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5055 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5060 * t4_read_rss - read the contents of the RSS mapping table
5061 * @adapter: the adapter
5062 * @map: holds the contents of the RSS mapping table
5064 * Reads the contents of the RSS hash->queue mapping table.
5066 int t4_read_rss(struct adapter *adapter, u16 *map)
5071 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
5072 ret = rd_rss_row(adapter, i, &val);
5075 *map++ = LKPTBLQUEUE0_G(val);
5076 *map++ = LKPTBLQUEUE1_G(val);
5081 static unsigned int t4_use_ldst(struct adapter *adap)
5083 return (adap->flags & FW_OK) || !adap->use_bd;
5087 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5088 * @adap: the adapter
5089 * @cmd: TP fw ldst address space type
5090 * @vals: where the indirect register values are stored/written
5091 * @nregs: how many indirect registers to read/write
5092 * @start_idx: index of first indirect register to read/write
5093 * @rw: Read (1) or Write (0)
5094 * @sleep_ok: if true we may sleep while awaiting command completion
5096 * Access TP indirect registers through LDST
5098 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5099 unsigned int nregs, unsigned int start_index,
5100 unsigned int rw, bool sleep_ok)
5104 struct fw_ldst_cmd c;
5106 for (i = 0; i < nregs; i++) {
5107 memset(&c, 0, sizeof(c));
5108 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5110 (rw ? FW_CMD_READ_F :
5112 FW_LDST_CMD_ADDRSPACE_V(cmd));
5113 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5115 c.u.addrval.addr = cpu_to_be32(start_index + i);
5116 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5117 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5123 vals[i] = be32_to_cpu(c.u.addrval.val);
5129 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5130 * @adap: the adapter
5131 * @reg_addr: Address Register
5132 * @reg_data: Data register
5133 * @buff: where the indirect register values are stored/written
5134 * @nregs: how many indirect registers to read/write
5135 * @start_index: index of first indirect register to read/write
5136 * @rw: READ(1) or WRITE(0)
5137 * @sleep_ok: if true we may sleep while awaiting command completion
5139 * Read/Write TP indirect registers through LDST if possible.
5140 * Else, use backdoor access
5142 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5143 u32 *buff, u32 nregs, u32 start_index, int rw,
5151 cmd = FW_LDST_ADDRSPC_TP_PIO;
5153 case TP_TM_PIO_ADDR_A:
5154 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5156 case TP_MIB_INDEX_A:
5157 cmd = FW_LDST_ADDRSPC_TP_MIB;
5160 goto indirect_access;
5163 if (t4_use_ldst(adap))
5164 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5171 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5174 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5180 * t4_tp_pio_read - Read TP PIO registers
5181 * @adap: the adapter
5182 * @buff: where the indirect register values are written
5183 * @nregs: how many indirect registers to read
5184 * @start_index: index of first indirect register to read
5185 * @sleep_ok: if true we may sleep while awaiting command completion
5187 * Read TP PIO Registers
5189 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5190 u32 start_index, bool sleep_ok)
5192 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5193 start_index, 1, sleep_ok);
5197 * t4_tp_pio_write - Write TP PIO registers
5198 * @adap: the adapter
5199 * @buff: where the indirect register values are stored
5200 * @nregs: how many indirect registers to write
5201 * @start_index: index of first indirect register to write
5202 * @sleep_ok: if true we may sleep while awaiting command completion
5204 * Write TP PIO Registers
5206 static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5207 u32 start_index, bool sleep_ok)
5209 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5210 start_index, 0, sleep_ok);
5214 * t4_tp_tm_pio_read - Read TP TM PIO registers
5215 * @adap: the adapter
5216 * @buff: where the indirect register values are written
5217 * @nregs: how many indirect registers to read
5218 * @start_index: index of first indirect register to read
5219 * @sleep_ok: if true we may sleep while awaiting command completion
5221 * Read TP TM PIO Registers
5223 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5224 u32 start_index, bool sleep_ok)
5226 t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5227 nregs, start_index, 1, sleep_ok);
5231 * t4_tp_mib_read - Read TP MIB registers
5232 * @adap: the adapter
5233 * @buff: where the indirect register values are written
5234 * @nregs: how many indirect registers to read
5235 * @start_index: index of first indirect register to read
5236 * @sleep_ok: if true we may sleep while awaiting command completion
5238 * Read TP MIB Registers
5240 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5243 t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5244 start_index, 1, sleep_ok);
5248 * t4_read_rss_key - read the global RSS key
5249 * @adap: the adapter
5250 * @key: 10-entry array holding the 320-bit RSS key
5251 * @sleep_ok: if true we may sleep while awaiting command completion
5253 * Reads the global 320-bit RSS key.
5255 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5257 t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5261 * t4_write_rss_key - program one of the RSS keys
5262 * @adap: the adapter
5263 * @key: 10-entry array holding the 320-bit RSS key
5264 * @idx: which RSS key to write
5265 * @sleep_ok: if true we may sleep while awaiting command completion
5267 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5268 * 0..15 the corresponding entry in the RSS key table is written,
5269 * otherwise the global RSS key is written.
5271 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5274 u8 rss_key_addr_cnt = 16;
5275 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5277 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5278 * allows access to key addresses 16-63 by using KeyWrAddrX
5279 * as index[5:4](upper 2) into key table
5281 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5282 (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5283 rss_key_addr_cnt = 32;
5285 t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5287 if (idx >= 0 && idx < rss_key_addr_cnt) {
5288 if (rss_key_addr_cnt > 16)
5289 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5290 KEYWRADDRX_V(idx >> 4) |
5291 T6_VFWRADDR_V(idx) | KEYWREN_F);
5293 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5294 KEYWRADDR_V(idx) | KEYWREN_F);
5299 * t4_read_rss_pf_config - read PF RSS Configuration Table
5300 * @adapter: the adapter
5301 * @index: the entry in the PF RSS table to read
5302 * @valp: where to store the returned value
5303 * @sleep_ok: if true we may sleep while awaiting command completion
5305 * Reads the PF RSS Configuration Table at the specified index and returns
5306 * the value found there.
5308 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5309 u32 *valp, bool sleep_ok)
5311 t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
5315 * t4_read_rss_vf_config - read VF RSS Configuration Table
5316 * @adapter: the adapter
5317 * @index: the entry in the VF RSS table to read
5318 * @vfl: where to store the returned VFL
5319 * @vfh: where to store the returned VFH
5320 * @sleep_ok: if true we may sleep while awaiting command completion
5322 * Reads the VF RSS Configuration Table at the specified index and returns
5323 * the (VFL, VFH) values found there.
5325 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5326 u32 *vfl, u32 *vfh, bool sleep_ok)
5328 u32 vrt, mask, data;
5330 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5331 mask = VFWRADDR_V(VFWRADDR_M);
5332 data = VFWRADDR_V(index);
5334 mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
5335 data = T6_VFWRADDR_V(index);
5338 /* Request that the index'th VF Table values be read into VFL/VFH.
5340 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5341 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5342 vrt |= data | VFRDEN_F;
5343 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5345 /* Grab the VFL/VFH values ...
5347 t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5348 t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
5352 * t4_read_rss_pf_map - read PF RSS Map
5353 * @adapter: the adapter
5354 * @sleep_ok: if true we may sleep while awaiting command completion
5356 * Reads the PF RSS Map register and returns its value.
5358 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5362 t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
5367 * t4_read_rss_pf_mask - read PF RSS Mask
5368 * @adapter: the adapter
5369 * @sleep_ok: if true we may sleep while awaiting command completion
5371 * Reads the PF RSS Mask register and returns its value.
5373 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5377 t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
5382 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5383 * @adap: the adapter
5384 * @v4: holds the TCP/IP counter values
5385 * @v6: holds the TCP/IPv6 counter values
5386 * @sleep_ok: if true we may sleep while awaiting command completion
5388 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5389 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5391 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5392 struct tp_tcp_stats *v6, bool sleep_ok)
5394 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
5396 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
5397 #define STAT(x) val[STAT_IDX(x)]
5398 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5401 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5402 TP_MIB_TCP_OUT_RST_A, sleep_ok);
5403 v4->tcp_out_rsts = STAT(OUT_RST);
5404 v4->tcp_in_segs = STAT64(IN_SEG);
5405 v4->tcp_out_segs = STAT64(OUT_SEG);
5406 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5409 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5410 TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
5411 v6->tcp_out_rsts = STAT(OUT_RST);
5412 v6->tcp_in_segs = STAT64(IN_SEG);
5413 v6->tcp_out_segs = STAT64(OUT_SEG);
5414 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5422 * t4_tp_get_err_stats - read TP's error MIB counters
5423 * @adap: the adapter
5424 * @st: holds the counter values
5425 * @sleep_ok: if true we may sleep while awaiting command completion
5427 * Returns the values of TP's error counters.
5429 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5432 int nchan = adap->params.arch.nchan;
5434 t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5436 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5438 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5440 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5441 TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5442 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5443 TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5444 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5446 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5447 TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5448 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5449 TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5450 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5455 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5456 * @adap: the adapter
5457 * @st: holds the counter values
5458 * @sleep_ok: if true we may sleep while awaiting command completion
5460 * Returns the values of TP's CPL counters.
5462 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5465 int nchan = adap->params.arch.nchan;
5467 t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
5469 t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
5473 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5474 * @adap: the adapter
5475 * @st: holds the counter values
5476 * @sleep_ok: if true we may sleep while awaiting command completion
5478 * Returns the values of TP's RDMA counters.
5480 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5483 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5488 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5489 * @adap: the adapter
5490 * @idx: the port index
5491 * @st: holds the counter values
5492 * @sleep_ok: if true we may sleep while awaiting command completion
5494 * Returns the values of TP's FCoE counters for the selected port.
5496 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5497 struct tp_fcoe_stats *st, bool sleep_ok)
5501 t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5504 t4_tp_mib_read(adap, &st->frames_drop, 1,
5505 TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5507 t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5510 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5514 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5515 * @adap: the adapter
5516 * @st: holds the counter values
5517 * @sleep_ok: if true we may sleep while awaiting command completion
5519 * Returns the values of TP's counters for non-TCP directly-placed packets.
5521 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5526 t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
5527 st->frames = val[0];
5529 st->octets = ((u64)val[2] << 32) | val[3];
5533 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5534 * @adap: the adapter
5535 * @mtus: where to store the MTU values
5536 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5538 * Reads the HW path MTU table.
5540 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5545 for (i = 0; i < NMTUS; ++i) {
5546 t4_write_reg(adap, TP_MTU_TABLE_A,
5547 MTUINDEX_V(0xff) | MTUVALUE_V(i));
5548 v = t4_read_reg(adap, TP_MTU_TABLE_A);
5549 mtus[i] = MTUVALUE_G(v);
5551 mtu_log[i] = MTUWIDTH_G(v);
5556 * t4_read_cong_tbl - reads the congestion control table
5557 * @adap: the adapter
5558 * @incr: where to store the alpha values
5560 * Reads the additive increments programmed into the HW congestion
5563 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5565 unsigned int mtu, w;
5567 for (mtu = 0; mtu < NMTUS; ++mtu)
5568 for (w = 0; w < NCCTRL_WIN; ++w) {
5569 t4_write_reg(adap, TP_CCTRL_TABLE_A,
5570 ROWINDEX_V(0xffff) | (mtu << 5) | w);
5571 incr[mtu][w] = (u16)t4_read_reg(adap,
5572 TP_CCTRL_TABLE_A) & 0x1fff;
5577 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5578 * @adap: the adapter
5579 * @addr: the indirect TP register address
5580 * @mask: specifies the field within the register to modify
5581 * @val: new value for the field
5583 * Sets a field of an indirect TP register to the given value.
5585 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5586 unsigned int mask, unsigned int val)
5588 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5589 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5590 t4_write_reg(adap, TP_PIO_DATA_A, val);
5594 * init_cong_ctrl - initialize congestion control parameters
5595 * @a: the alpha values for congestion control
5596 * @b: the beta values for congestion control
5598 * Initialize the congestion control parameters.
5600 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5602 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5627 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5630 b[13] = b[14] = b[15] = b[16] = 3;
5631 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5632 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5637 /* The minimum additive increment value for the congestion control table */
5638 #define CC_MIN_INCR 2U
5641 * t4_load_mtus - write the MTU and congestion control HW tables
5642 * @adap: the adapter
5643 * @mtus: the values for the MTU table
5644 * @alpha: the values for the congestion control alpha parameter
5645 * @beta: the values for the congestion control beta parameter
5647 * Write the HW MTU table with the supplied MTUs and the high-speed
5648 * congestion control table with the supplied alpha, beta, and MTUs.
5649 * We write the two tables together because the additive increments
5650 * depend on the MTUs.
5652 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5653 const unsigned short *alpha, const unsigned short *beta)
5655 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5656 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5657 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5658 28672, 40960, 57344, 81920, 114688, 163840, 229376
5663 for (i = 0; i < NMTUS; ++i) {
5664 unsigned int mtu = mtus[i];
5665 unsigned int log2 = fls(mtu);
5667 if (!(mtu & ((1 << log2) >> 2))) /* round */
5669 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5670 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5672 for (w = 0; w < NCCTRL_WIN; ++w) {
5675 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5678 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5679 (w << 16) | (beta[w] << 13) | inc);
5684 /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5685 * clocks. The formula is
5687 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5689 * which is equivalent to
5691 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5693 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5695 u64 v = bytes256 * adap->params.vpd.cclk;
5697 return v * 62 + v / 2;
5701 * t4_get_chan_txrate - get the current per channel Tx rates
5702 * @adap: the adapter
5703 * @nic_rate: rates for NIC traffic
5704 * @ofld_rate: rates for offloaded traffic
5706 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5709 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5713 v = t4_read_reg(adap, TP_TX_TRATE_A);
5714 nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5715 nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5716 if (adap->params.arch.nchan == NCHAN) {
5717 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5718 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5721 v = t4_read_reg(adap, TP_TX_ORATE_A);
5722 ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5723 ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5724 if (adap->params.arch.nchan == NCHAN) {
5725 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5726 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5731 * t4_set_trace_filter - configure one of the tracing filters
5732 * @adap: the adapter
5733 * @tp: the desired trace filter parameters
5734 * @idx: which filter to configure
5735 * @enable: whether to enable or disable the filter
5737 * Configures one of the tracing filters available in HW. If @enable is
5738 * %0 @tp is not examined and may be %NULL. The user is responsible to
5739 * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5741 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5742 int idx, int enable)
5744 int i, ofst = idx * 4;
5745 u32 data_reg, mask_reg, cfg;
5746 u32 multitrc = TRCMULTIFILTER_F;
5749 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5753 cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5754 if (cfg & TRCMULTIFILTER_F) {
5755 /* If multiple tracers are enabled, then maximum
5756 * capture size is 2.5KB (FIFO size of a single channel)
5757 * minus 2 flits for CPL_TRACE_PKT header.
5759 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5762 /* If multiple tracers are disabled, to avoid deadlocks
5763 * maximum packet capture size of 9600 bytes is recommended.
5764 * Also in this mode, only trace0 can be enabled and running.
5767 if (tp->snap_len > 9600 || idx)
5771 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5772 tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5773 tp->min_len > TFMINPKTSIZE_M)
5776 /* stop the tracer we'll be changing */
5777 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5779 idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5780 data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5781 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5783 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5784 t4_write_reg(adap, data_reg, tp->data[i]);
5785 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5787 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5788 TFCAPTUREMAX_V(tp->snap_len) |
5789 TFMINPKTSIZE_V(tp->min_len));
5790 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
5791 TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
5792 (is_t4(adap->params.chip) ?
5793 TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
5794 T5_TFPORT_V(tp->port) | T5_TFEN_F |
5795 T5_TFINVERTMATCH_V(tp->invert)));
5801 * t4_get_trace_filter - query one of the tracing filters
5802 * @adap: the adapter
5803 * @tp: the current trace filter parameters
5804 * @idx: which trace filter to query
5805 * @enabled: non-zero if the filter is enabled
5807 * Returns the current settings of one of the HW tracing filters.
5809 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5813 int i, ofst = idx * 4;
5814 u32 data_reg, mask_reg;
5816 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
5817 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
5819 if (is_t4(adap->params.chip)) {
5820 *enabled = !!(ctla & TFEN_F);
5821 tp->port = TFPORT_G(ctla);
5822 tp->invert = !!(ctla & TFINVERTMATCH_F);
5824 *enabled = !!(ctla & T5_TFEN_F);
5825 tp->port = T5_TFPORT_G(ctla);
5826 tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
5828 tp->snap_len = TFCAPTUREMAX_G(ctlb);
5829 tp->min_len = TFMINPKTSIZE_G(ctlb);
5830 tp->skip_ofst = TFOFFSET_G(ctla);
5831 tp->skip_len = TFLENGTH_G(ctla);
5833 ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
5834 data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
5835 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
5837 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5838 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5839 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5844 * t4_pmtx_get_stats - returns the HW stats from PMTX
5845 * @adap: the adapter
5846 * @cnt: where to store the count statistics
5847 * @cycles: where to store the cycle statistics
5849 * Returns performance statistics from PMTX.
5851 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5856 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5857 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
5858 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
5859 if (is_t4(adap->params.chip)) {
5860 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
5862 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
5863 PM_TX_DBG_DATA_A, data, 2,
5864 PM_TX_DBG_STAT_MSB_A);
5865 cycles[i] = (((u64)data[0] << 32) | data[1]);
5871 * t4_pmrx_get_stats - returns the HW stats from PMRX
5872 * @adap: the adapter
5873 * @cnt: where to store the count statistics
5874 * @cycles: where to store the cycle statistics
5876 * Returns performance statistics from PMRX.
5878 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5883 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5884 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
5885 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
5886 if (is_t4(adap->params.chip)) {
5887 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
5889 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
5890 PM_RX_DBG_DATA_A, data, 2,
5891 PM_RX_DBG_STAT_MSB_A);
5892 cycles[i] = (((u64)data[0] << 32) | data[1]);
5898 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
5899 * @adap: the adapter
5900 * @pidx: the port index
5902 * Computes and returns a bitmap indicating which MPS buffer groups are
5903 * associated with the given Port. Bit i is set if buffer group i is
5906 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
5909 unsigned int chip_version, nports;
5911 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
5912 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
5914 switch (chip_version) {
5919 case 2: return 3 << (2 * pidx);
5920 case 4: return 1 << pidx;
5926 case 2: return 1 << (2 * pidx);
5931 dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
5932 chip_version, nports);
5938 * t4_get_mps_bg_map - return the buffer groups associated with a port
5939 * @adapter: the adapter
5940 * @pidx: the port index
5942 * Returns a bitmap indicating which MPS buffer groups are associated
5943 * with the given Port. Bit i is set if buffer group i is used by the
5946 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
5949 unsigned int nports;
5951 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
5952 if (pidx >= nports) {
5953 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
5958 /* If we've already retrieved/computed this, just return the result.
5960 mps_bg_map = adapter->params.mps_bg_map;
5961 if (mps_bg_map[pidx])
5962 return mps_bg_map[pidx];
5964 /* Newer Firmware can tell us what the MPS Buffer Group Map is.
5965 * If we're talking to such Firmware, let it tell us. If the new
5966 * API isn't supported, revert back to old hardcoded way. The value
5967 * obtained from Firmware is encoded in below format:
5969 * val = (( MPSBGMAP[Port 3] << 24 ) |
5970 * ( MPSBGMAP[Port 2] << 16 ) |
5971 * ( MPSBGMAP[Port 1] << 8 ) |
5972 * ( MPSBGMAP[Port 0] << 0 ))
5974 if (adapter->flags & FW_OK) {
5978 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5979 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
5980 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
5981 0, 1, ¶m, &val);
5985 /* Store the BG Map for all of the Ports in order to
5986 * avoid more calls to the Firmware in the future.
5988 for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
5989 mps_bg_map[p] = val & 0xff;
5991 return mps_bg_map[pidx];
5995 /* Either we're not talking to the Firmware or we're dealing with
5996 * older Firmware which doesn't support the new API to get the MPS
5997 * Buffer Group Map. Fall back to computing it ourselves.
5999 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6000 return mps_bg_map[pidx];
6004 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6005 * @adapter: the adapter
6006 * @pidx: the port index
6008 * Returns a bitmap indicating which TP Ingress Channels are associated
6009 * with a given Port. Bit i is set if TP Ingress Channel i is used by
6012 unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6014 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6015 unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6017 if (pidx >= nports) {
6018 dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6023 switch (chip_version) {
6026 /* Note that this happens to be the same values as the MPS
6027 * Buffer Group Map for these Chips. But we replicate the code
6028 * here because they're really separate concepts.
6032 case 2: return 3 << (2 * pidx);
6033 case 4: return 1 << pidx;
6039 case 2: return 1 << pidx;
6044 dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6045 chip_version, nports);
6050 * t4_get_port_type_description - return Port Type string description
6051 * @port_type: firmware Port Type enumeration
6053 const char *t4_get_port_type_description(enum fw_port_type port_type)
6055 static const char *const port_type_description[] = {
6080 if (port_type < ARRAY_SIZE(port_type_description))
6081 return port_type_description[port_type];
6086 * t4_get_port_stats_offset - collect port stats relative to a previous
6088 * @adap: The adapter
6090 * @stats: Current stats to fill
6091 * @offset: Previous stats snapshot
6093 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6094 struct port_stats *stats,
6095 struct port_stats *offset)
6100 t4_get_port_stats(adap, idx, stats);
6101 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6102 i < (sizeof(struct port_stats) / sizeof(u64));
6108 * t4_get_port_stats - collect port statistics
6109 * @adap: the adapter
6110 * @idx: the port index
6111 * @p: the stats structure to fill
6113 * Collect statistics related to the given port from HW.
6115 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6117 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6118 u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
6120 #define GET_STAT(name) \
6121 t4_read_reg64(adap, \
6122 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
6123 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
6124 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6126 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6127 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6128 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6129 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6130 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6131 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6132 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6133 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6134 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6135 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6136 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6137 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6138 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6139 p->tx_drop = GET_STAT(TX_PORT_DROP);
6140 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6141 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6142 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6143 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6144 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6145 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6146 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6147 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6148 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6150 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6151 if (stat_ctl & COUNTPAUSESTATTX_F)
6152 p->tx_frames_64 -= p->tx_pause;
6153 if (stat_ctl & COUNTPAUSEMCTX_F)
6154 p->tx_mcast_frames -= p->tx_pause;
6156 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6157 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6158 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6159 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6160 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6161 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6162 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6163 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6164 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6165 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6166 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6167 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6168 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6169 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6170 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6171 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6172 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6173 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6174 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6175 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6176 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6177 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6178 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6179 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6180 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6181 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6182 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6184 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6185 if (stat_ctl & COUNTPAUSESTATRX_F)
6186 p->rx_frames_64 -= p->rx_pause;
6187 if (stat_ctl & COUNTPAUSEMCRX_F)
6188 p->rx_mcast_frames -= p->rx_pause;
6191 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6192 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6193 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6194 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6195 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6196 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6197 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6198 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6205 * t4_get_lb_stats - collect loopback port statistics
6206 * @adap: the adapter
6207 * @idx: the loopback port index
6208 * @p: the stats structure to fill
6210 * Return HW statistics for the given loopback port.
6212 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6214 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6216 #define GET_STAT(name) \
6217 t4_read_reg64(adap, \
6218 (is_t4(adap->params.chip) ? \
6219 PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6220 T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6221 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6223 p->octets = GET_STAT(BYTES);
6224 p->frames = GET_STAT(FRAMES);
6225 p->bcast_frames = GET_STAT(BCAST);
6226 p->mcast_frames = GET_STAT(MCAST);
6227 p->ucast_frames = GET_STAT(UCAST);
6228 p->error_frames = GET_STAT(ERROR);
6230 p->frames_64 = GET_STAT(64B);
6231 p->frames_65_127 = GET_STAT(65B_127B);
6232 p->frames_128_255 = GET_STAT(128B_255B);
6233 p->frames_256_511 = GET_STAT(256B_511B);
6234 p->frames_512_1023 = GET_STAT(512B_1023B);
6235 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6236 p->frames_1519_max = GET_STAT(1519B_MAX);
6237 p->drop = GET_STAT(DROP_FRAMES);
6239 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6240 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6241 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6242 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6243 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6244 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6245 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6246 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6252 /* t4_mk_filtdelwr - create a delete filter WR
6253 * @ftid: the filter ID
6254 * @wr: the filter work request to populate
6255 * @qid: ingress queue to receive the delete notification
6257 * Creates a filter work request to delete the supplied filter. If @qid is
6258 * negative the delete notification is suppressed.
6260 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6262 memset(wr, 0, sizeof(*wr));
6263 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6264 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6265 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6266 FW_FILTER_WR_NOREPLY_V(qid < 0));
6267 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
6269 wr->rx_chan_rx_rpl_iq =
6270 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
6273 #define INIT_CMD(var, cmd, rd_wr) do { \
6274 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6275 FW_CMD_REQUEST_F | \
6276 FW_CMD_##rd_wr##_F); \
6277 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6280 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6284 struct fw_ldst_cmd c;
6286 memset(&c, 0, sizeof(c));
6287 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6288 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6292 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6293 c.u.addrval.addr = cpu_to_be32(addr);
6294 c.u.addrval.val = cpu_to_be32(val);
6296 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6300 * t4_mdio_rd - read a PHY register through MDIO
6301 * @adap: the adapter
6302 * @mbox: mailbox to use for the FW command
6303 * @phy_addr: the PHY address
6304 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6305 * @reg: the register to read
6306 * @valp: where to store the value
6308 * Issues a FW command through the given mailbox to read a PHY register.
6310 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6311 unsigned int mmd, unsigned int reg, u16 *valp)
6315 struct fw_ldst_cmd c;
6317 memset(&c, 0, sizeof(c));
6318 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6319 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6320 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6322 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6323 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6324 FW_LDST_CMD_MMD_V(mmd));
6325 c.u.mdio.raddr = cpu_to_be16(reg);
6327 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6329 *valp = be16_to_cpu(c.u.mdio.rval);
6334 * t4_mdio_wr - write a PHY register through MDIO
6335 * @adap: the adapter
6336 * @mbox: mailbox to use for the FW command
6337 * @phy_addr: the PHY address
6338 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6339 * @reg: the register to write
6340 * @valp: value to write
6342 * Issues a FW command through the given mailbox to write a PHY register.
6344 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6345 unsigned int mmd, unsigned int reg, u16 val)
6348 struct fw_ldst_cmd c;
6350 memset(&c, 0, sizeof(c));
6351 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6352 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6353 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6355 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6356 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6357 FW_LDST_CMD_MMD_V(mmd));
6358 c.u.mdio.raddr = cpu_to_be16(reg);
6359 c.u.mdio.rval = cpu_to_be16(val);
6361 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6365 * t4_sge_decode_idma_state - decode the idma state
6366 * @adap: the adapter
6367 * @state: the state idma is stuck in
6369 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6371 static const char * const t4_decode[] = {
6373 "IDMA_PUSH_MORE_CPL_FIFO",
6374 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6376 "IDMA_PHYSADDR_SEND_PCIEHDR",
6377 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6378 "IDMA_PHYSADDR_SEND_PAYLOAD",
6379 "IDMA_SEND_FIFO_TO_IMSG",
6380 "IDMA_FL_REQ_DATA_FL_PREP",
6381 "IDMA_FL_REQ_DATA_FL",
6383 "IDMA_FL_H_REQ_HEADER_FL",
6384 "IDMA_FL_H_SEND_PCIEHDR",
6385 "IDMA_FL_H_PUSH_CPL_FIFO",
6386 "IDMA_FL_H_SEND_CPL",
6387 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6388 "IDMA_FL_H_SEND_IP_HDR",
6389 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6390 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6391 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6392 "IDMA_FL_D_SEND_PCIEHDR",
6393 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6394 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6395 "IDMA_FL_SEND_PCIEHDR",
6396 "IDMA_FL_PUSH_CPL_FIFO",
6398 "IDMA_FL_SEND_PAYLOAD_FIRST",
6399 "IDMA_FL_SEND_PAYLOAD",
6400 "IDMA_FL_REQ_NEXT_DATA_FL",
6401 "IDMA_FL_SEND_NEXT_PCIEHDR",
6402 "IDMA_FL_SEND_PADDING",
6403 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6404 "IDMA_FL_SEND_FIFO_TO_IMSG",
6405 "IDMA_FL_REQ_DATAFL_DONE",
6406 "IDMA_FL_REQ_HEADERFL_DONE",
6408 static const char * const t5_decode[] = {
6411 "IDMA_PUSH_MORE_CPL_FIFO",
6412 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6413 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6414 "IDMA_PHYSADDR_SEND_PCIEHDR",
6415 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6416 "IDMA_PHYSADDR_SEND_PAYLOAD",
6417 "IDMA_SEND_FIFO_TO_IMSG",
6418 "IDMA_FL_REQ_DATA_FL",
6420 "IDMA_FL_DROP_SEND_INC",
6421 "IDMA_FL_H_REQ_HEADER_FL",
6422 "IDMA_FL_H_SEND_PCIEHDR",
6423 "IDMA_FL_H_PUSH_CPL_FIFO",
6424 "IDMA_FL_H_SEND_CPL",
6425 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6426 "IDMA_FL_H_SEND_IP_HDR",
6427 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6428 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6429 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6430 "IDMA_FL_D_SEND_PCIEHDR",
6431 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6432 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6433 "IDMA_FL_SEND_PCIEHDR",
6434 "IDMA_FL_PUSH_CPL_FIFO",
6436 "IDMA_FL_SEND_PAYLOAD_FIRST",
6437 "IDMA_FL_SEND_PAYLOAD",
6438 "IDMA_FL_REQ_NEXT_DATA_FL",
6439 "IDMA_FL_SEND_NEXT_PCIEHDR",
6440 "IDMA_FL_SEND_PADDING",
6441 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6443 static const char * const t6_decode[] = {
6445 "IDMA_PUSH_MORE_CPL_FIFO",
6446 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6447 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6448 "IDMA_PHYSADDR_SEND_PCIEHDR",
6449 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6450 "IDMA_PHYSADDR_SEND_PAYLOAD",
6451 "IDMA_FL_REQ_DATA_FL",
6453 "IDMA_FL_DROP_SEND_INC",
6454 "IDMA_FL_H_REQ_HEADER_FL",
6455 "IDMA_FL_H_SEND_PCIEHDR",
6456 "IDMA_FL_H_PUSH_CPL_FIFO",
6457 "IDMA_FL_H_SEND_CPL",
6458 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6459 "IDMA_FL_H_SEND_IP_HDR",
6460 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6461 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6462 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6463 "IDMA_FL_D_SEND_PCIEHDR",
6464 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6465 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6466 "IDMA_FL_SEND_PCIEHDR",
6467 "IDMA_FL_PUSH_CPL_FIFO",
6469 "IDMA_FL_SEND_PAYLOAD_FIRST",
6470 "IDMA_FL_SEND_PAYLOAD",
6471 "IDMA_FL_REQ_NEXT_DATA_FL",
6472 "IDMA_FL_SEND_NEXT_PCIEHDR",
6473 "IDMA_FL_SEND_PADDING",
6474 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6476 static const u32 sge_regs[] = {
6477 SGE_DEBUG_DATA_LOW_INDEX_2_A,
6478 SGE_DEBUG_DATA_LOW_INDEX_3_A,
6479 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
6481 const char **sge_idma_decode;
6482 int sge_idma_decode_nstates;
6484 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6486 /* Select the right set of decode strings to dump depending on the
6487 * adapter chip type.
6489 switch (chip_version) {
6491 sge_idma_decode = (const char **)t4_decode;
6492 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6496 sge_idma_decode = (const char **)t5_decode;
6497 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6501 sge_idma_decode = (const char **)t6_decode;
6502 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6506 dev_err(adapter->pdev_dev,
6507 "Unsupported chip version %d\n", chip_version);
6511 if (is_t4(adapter->params.chip)) {
6512 sge_idma_decode = (const char **)t4_decode;
6513 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6515 sge_idma_decode = (const char **)t5_decode;
6516 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6519 if (state < sge_idma_decode_nstates)
6520 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6522 CH_WARN(adapter, "idma state %d unknown\n", state);
6524 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6525 CH_WARN(adapter, "SGE register %#x value %#x\n",
6526 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6530 * t4_sge_ctxt_flush - flush the SGE context cache
6531 * @adap: the adapter
6532 * @mbox: mailbox to use for the FW command
6533 * @ctx_type: Egress or Ingress
6535 * Issues a FW command through the given mailbox to flush the
6536 * SGE context cache.
6538 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
6542 struct fw_ldst_cmd c;
6544 memset(&c, 0, sizeof(c));
6545 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
6546 FW_LDST_ADDRSPC_SGE_EGRC :
6547 FW_LDST_ADDRSPC_SGE_INGC);
6548 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6549 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6551 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6552 c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6554 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6559 * t4_fw_hello - establish communication with FW
6560 * @adap: the adapter
6561 * @mbox: mailbox to use for the FW command
6562 * @evt_mbox: mailbox to receive async FW events
6563 * @master: specifies the caller's willingness to be the device master
6564 * @state: returns the current device state (if non-NULL)
6566 * Issues a command to establish communication with FW. Returns either
6567 * an error (negative integer) or the mailbox of the Master PF.
6569 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6570 enum dev_master master, enum dev_state *state)
6573 struct fw_hello_cmd c;
6575 unsigned int master_mbox;
6576 int retries = FW_CMD_HELLO_RETRIES;
6579 memset(&c, 0, sizeof(c));
6580 INIT_CMD(c, HELLO, WRITE);
6581 c.err_to_clearinit = cpu_to_be32(
6582 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6583 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6584 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6585 mbox : FW_HELLO_CMD_MBMASTER_M) |
6586 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6587 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6588 FW_HELLO_CMD_CLEARINIT_F);
6591 * Issue the HELLO command to the firmware. If it's not successful
6592 * but indicates that we got a "busy" or "timeout" condition, retry
6593 * the HELLO until we exhaust our retry limit. If we do exceed our
6594 * retry limit, check to see if the firmware left us any error
6595 * information and report that if so.
6597 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6599 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6601 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6602 t4_report_fw_error(adap);
6606 v = be32_to_cpu(c.err_to_clearinit);
6607 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6609 if (v & FW_HELLO_CMD_ERR_F)
6610 *state = DEV_STATE_ERR;
6611 else if (v & FW_HELLO_CMD_INIT_F)
6612 *state = DEV_STATE_INIT;
6614 *state = DEV_STATE_UNINIT;
6618 * If we're not the Master PF then we need to wait around for the
6619 * Master PF Driver to finish setting up the adapter.
6621 * Note that we also do this wait if we're a non-Master-capable PF and
6622 * there is no current Master PF; a Master PF may show up momentarily
6623 * and we wouldn't want to fail pointlessly. (This can happen when an
6624 * OS loads lots of different drivers rapidly at the same time). In
6625 * this case, the Master PF returned by the firmware will be
6626 * PCIE_FW_MASTER_M so the test below will work ...
6628 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6629 master_mbox != mbox) {
6630 int waiting = FW_CMD_HELLO_TIMEOUT;
6633 * Wait for the firmware to either indicate an error or
6634 * initialized state. If we see either of these we bail out
6635 * and report the issue to the caller. If we exhaust the
6636 * "hello timeout" and we haven't exhausted our retries, try
6637 * again. Otherwise bail with a timeout error.
6646 * If neither Error nor Initialialized are indicated
6647 * by the firmware keep waiting till we exaust our
6648 * timeout ... and then retry if we haven't exhausted
6651 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6652 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6663 * We either have an Error or Initialized condition
6664 * report errors preferentially.
6667 if (pcie_fw & PCIE_FW_ERR_F)
6668 *state = DEV_STATE_ERR;
6669 else if (pcie_fw & PCIE_FW_INIT_F)
6670 *state = DEV_STATE_INIT;
6674 * If we arrived before a Master PF was selected and
6675 * there's not a valid Master PF, grab its identity
6678 if (master_mbox == PCIE_FW_MASTER_M &&
6679 (pcie_fw & PCIE_FW_MASTER_VLD_F))
6680 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6689 * t4_fw_bye - end communication with FW
6690 * @adap: the adapter
6691 * @mbox: mailbox to use for the FW command
6693 * Issues a command to terminate communication with FW.
6695 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6697 struct fw_bye_cmd c;
6699 memset(&c, 0, sizeof(c));
6700 INIT_CMD(c, BYE, WRITE);
6701 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6705 * t4_init_cmd - ask FW to initialize the device
6706 * @adap: the adapter
6707 * @mbox: mailbox to use for the FW command
6709 * Issues a command to FW to partially initialize the device. This
6710 * performs initialization that generally doesn't depend on user input.
6712 int t4_early_init(struct adapter *adap, unsigned int mbox)
6714 struct fw_initialize_cmd c;
6716 memset(&c, 0, sizeof(c));
6717 INIT_CMD(c, INITIALIZE, WRITE);
6718 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6722 * t4_fw_reset - issue a reset to FW
6723 * @adap: the adapter
6724 * @mbox: mailbox to use for the FW command
6725 * @reset: specifies the type of reset to perform
6727 * Issues a reset command of the specified type to FW.
6729 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6731 struct fw_reset_cmd c;
6733 memset(&c, 0, sizeof(c));
6734 INIT_CMD(c, RESET, WRITE);
6735 c.val = cpu_to_be32(reset);
6736 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6740 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6741 * @adap: the adapter
6742 * @mbox: mailbox to use for the FW RESET command (if desired)
6743 * @force: force uP into RESET even if FW RESET command fails
6745 * Issues a RESET command to firmware (if desired) with a HALT indication
6746 * and then puts the microprocessor into RESET state. The RESET command
6747 * will only be issued if a legitimate mailbox is provided (mbox <=
6748 * PCIE_FW_MASTER_M).
6750 * This is generally used in order for the host to safely manipulate the
6751 * adapter without fear of conflicting with whatever the firmware might
6752 * be doing. The only way out of this state is to RESTART the firmware
6755 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6760 * If a legitimate mailbox is provided, issue a RESET command
6761 * with a HALT indication.
6763 if (mbox <= PCIE_FW_MASTER_M) {
6764 struct fw_reset_cmd c;
6766 memset(&c, 0, sizeof(c));
6767 INIT_CMD(c, RESET, WRITE);
6768 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
6769 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
6770 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6774 * Normally we won't complete the operation if the firmware RESET
6775 * command fails but if our caller insists we'll go ahead and put the
6776 * uP into RESET. This can be useful if the firmware is hung or even
6777 * missing ... We'll have to take the risk of putting the uP into
6778 * RESET without the cooperation of firmware in that case.
6780 * We also force the firmware's HALT flag to be on in case we bypassed
6781 * the firmware RESET command above or we're dealing with old firmware
6782 * which doesn't have the HALT capability. This will serve as a flag
6783 * for the incoming firmware to know that it's coming out of a HALT
6784 * rather than a RESET ... if it's new enough to understand that ...
6786 if (ret == 0 || force) {
6787 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
6788 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
6793 * And we always return the result of the firmware RESET command
6794 * even when we force the uP into RESET ...
6800 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6801 * @adap: the adapter
6802 * @reset: if we want to do a RESET to restart things
6804 * Restart firmware previously halted by t4_fw_halt(). On successful
6805 * return the previous PF Master remains as the new PF Master and there
6806 * is no need to issue a new HELLO command, etc.
6808 * We do this in two ways:
6810 * 1. If we're dealing with newer firmware we'll simply want to take
6811 * the chip's microprocessor out of RESET. This will cause the
6812 * firmware to start up from its start vector. And then we'll loop
6813 * until the firmware indicates it's started again (PCIE_FW.HALT
6814 * reset to 0) or we timeout.
6816 * 2. If we're dealing with older firmware then we'll need to RESET
6817 * the chip since older firmware won't recognize the PCIE_FW.HALT
6818 * flag and automatically RESET itself on startup.
6820 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6824 * Since we're directing the RESET instead of the firmware
6825 * doing it automatically, we need to clear the PCIE_FW.HALT
6828 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
6831 * If we've been given a valid mailbox, first try to get the
6832 * firmware to do the RESET. If that works, great and we can
6833 * return success. Otherwise, if we haven't been given a
6834 * valid mailbox or the RESET command failed, fall back to
6835 * hitting the chip with a hammer.
6837 if (mbox <= PCIE_FW_MASTER_M) {
6838 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6840 if (t4_fw_reset(adap, mbox,
6841 PIORST_F | PIORSTMODE_F) == 0)
6845 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
6850 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6851 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6852 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
6863 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6864 * @adap: the adapter
6865 * @mbox: mailbox to use for the FW RESET command (if desired)
6866 * @fw_data: the firmware image to write
6868 * @force: force upgrade even if firmware doesn't cooperate
6870 * Perform all of the steps necessary for upgrading an adapter's
6871 * firmware image. Normally this requires the cooperation of the
6872 * existing firmware in order to halt all existing activities
6873 * but if an invalid mailbox token is passed in we skip that step
6874 * (though we'll still put the adapter microprocessor into RESET in
6877 * On successful return the new firmware will have been loaded and
6878 * the adapter will have been fully RESET losing all previous setup
6879 * state. On unsuccessful return the adapter may be completely hosed ...
6880 * positive errno indicates that the adapter is ~probably~ intact, a
6881 * negative errno indicates that things are looking bad ...
6883 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6884 const u8 *fw_data, unsigned int size, int force)
6886 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6889 if (!t4_fw_matches_chip(adap, fw_hdr))
6892 /* Disable FW_OK flag so that mbox commands with FW_OK flag set
6893 * wont be sent when we are flashing FW.
6895 adap->flags &= ~FW_OK;
6897 ret = t4_fw_halt(adap, mbox, force);
6898 if (ret < 0 && !force)
6901 ret = t4_load_fw(adap, fw_data, size);
6906 * If there was a Firmware Configuration File stored in FLASH,
6907 * there's a good chance that it won't be compatible with the new
6908 * Firmware. In order to prevent difficult to diagnose adapter
6909 * initialization issues, we clear out the Firmware Configuration File
6910 * portion of the FLASH . The user will need to re-FLASH a new
6911 * Firmware Configuration File which is compatible with the new
6912 * Firmware if that's desired.
6914 (void)t4_load_cfg(adap, NULL, 0);
6917 * Older versions of the firmware don't understand the new
6918 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6919 * restart. So for newly loaded older firmware we'll have to do the
6920 * RESET for it so it starts up on a clean slate. We can tell if
6921 * the newly loaded firmware will handle this right by checking
6922 * its header flags to see if it advertises the capability.
6924 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6925 ret = t4_fw_restart(adap, mbox, reset);
6927 /* Grab potentially new Firmware Device Log parameters so we can see
6928 * how healthy the new Firmware is. It's okay to contact the new
6929 * Firmware for these parameters even though, as far as it's
6930 * concerned, we've never said "HELLO" to it ...
6932 (void)t4_init_devlog_params(adap);
6934 adap->flags |= FW_OK;
6939 * t4_fl_pkt_align - return the fl packet alignment
6940 * @adap: the adapter
6942 * T4 has a single field to specify the packing and padding boundary.
6943 * T5 onwards has separate fields for this and hence the alignment for
6944 * next packet offset is maximum of these two.
6947 int t4_fl_pkt_align(struct adapter *adap)
6949 u32 sge_control, sge_control2;
6950 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
6952 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
6954 /* T4 uses a single control field to specify both the PCIe Padding and
6955 * Packing Boundary. T5 introduced the ability to specify these
6956 * separately. The actual Ingress Packet Data alignment boundary
6957 * within Packed Buffer Mode is the maximum of these two
6958 * specifications. (Note that it makes no real practical sense to
6959 * have the Pading Boudary be larger than the Packing Boundary but you
6960 * could set the chip up that way and, in fact, legacy T4 code would
6961 * end doing this because it would initialize the Padding Boundary and
6962 * leave the Packing Boundary initialized to 0 (16 bytes).)
6963 * Padding Boundary values in T6 starts from 8B,
6964 * where as it is 32B for T4 and T5.
6966 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
6967 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
6969 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
6971 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
6973 fl_align = ingpadboundary;
6974 if (!is_t4(adap->params.chip)) {
6975 /* T5 has a weird interpretation of one of the PCIe Packing
6976 * Boundary values. No idea why ...
6978 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
6979 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
6980 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
6981 ingpackboundary = 16;
6983 ingpackboundary = 1 << (ingpackboundary +
6984 INGPACKBOUNDARY_SHIFT_X);
6986 fl_align = max(ingpadboundary, ingpackboundary);
6992 * t4_fixup_host_params - fix up host-dependent parameters
6993 * @adap: the adapter
6994 * @page_size: the host's Base Page Size
6995 * @cache_line_size: the host's Cache Line Size
6997 * Various registers in T4 contain values which are dependent on the
6998 * host's Base Page and Cache Line Sizes. This function will fix all of
6999 * those registers with the appropriate values as passed in ...
7001 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7002 unsigned int cache_line_size)
7004 unsigned int page_shift = fls(page_size) - 1;
7005 unsigned int sge_hps = page_shift - 10;
7006 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7007 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7008 unsigned int fl_align_log = fls(fl_align) - 1;
7010 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7011 HOSTPAGESIZEPF0_V(sge_hps) |
7012 HOSTPAGESIZEPF1_V(sge_hps) |
7013 HOSTPAGESIZEPF2_V(sge_hps) |
7014 HOSTPAGESIZEPF3_V(sge_hps) |
7015 HOSTPAGESIZEPF4_V(sge_hps) |
7016 HOSTPAGESIZEPF5_V(sge_hps) |
7017 HOSTPAGESIZEPF6_V(sge_hps) |
7018 HOSTPAGESIZEPF7_V(sge_hps));
7020 if (is_t4(adap->params.chip)) {
7021 t4_set_reg_field(adap, SGE_CONTROL_A,
7022 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7023 EGRSTATUSPAGESIZE_F,
7024 INGPADBOUNDARY_V(fl_align_log -
7025 INGPADBOUNDARY_SHIFT_X) |
7026 EGRSTATUSPAGESIZE_V(stat_len != 64));
7028 unsigned int pack_align;
7029 unsigned int ingpad, ingpack;
7030 unsigned int pcie_cap;
7032 /* T5 introduced the separation of the Free List Padding and
7033 * Packing Boundaries. Thus, we can select a smaller Padding
7034 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7035 * Bandwidth, and use a Packing Boundary which is large enough
7036 * to avoid false sharing between CPUs, etc.
7038 * For the PCI Link, the smaller the Padding Boundary the
7039 * better. For the Memory Controller, a smaller Padding
7040 * Boundary is better until we cross under the Memory Line
7041 * Size (the minimum unit of transfer to/from Memory). If we
7042 * have a Padding Boundary which is smaller than the Memory
7043 * Line Size, that'll involve a Read-Modify-Write cycle on the
7044 * Memory Controller which is never good.
7047 /* We want the Packing Boundary to be based on the Cache Line
7048 * Size in order to help avoid False Sharing performance
7049 * issues between CPUs, etc. We also want the Packing
7050 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7051 * get best performance when the Packing Boundary is a
7052 * multiple of the Maximum Payload Size.
7054 pack_align = fl_align;
7055 pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
7057 unsigned int mps, mps_log;
7060 /* The PCIe Device Control Maximum Payload Size field
7061 * [bits 7:5] encodes sizes as powers of 2 starting at
7064 pci_read_config_word(adap->pdev,
7065 pcie_cap + PCI_EXP_DEVCTL,
7067 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7069 if (mps > pack_align)
7073 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7074 * value for the Packing Boundary. This corresponds to 16
7075 * bytes instead of the expected 32 bytes. So if we want 32
7076 * bytes, the best we can really do is 64 bytes ...
7078 if (pack_align <= 16) {
7079 ingpack = INGPACKBOUNDARY_16B_X;
7081 } else if (pack_align == 32) {
7082 ingpack = INGPACKBOUNDARY_64B_X;
7085 unsigned int pack_align_log = fls(pack_align) - 1;
7087 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7088 fl_align = pack_align;
7091 /* Use the smallest Ingress Padding which isn't smaller than
7092 * the Memory Controller Read/Write Size. We'll take that as
7093 * being 8 bytes since we don't know of any system with a
7094 * wider Memory Controller Bus Width.
7096 if (is_t5(adap->params.chip))
7097 ingpad = INGPADBOUNDARY_32B_X;
7099 ingpad = T6_INGPADBOUNDARY_8B_X;
7101 t4_set_reg_field(adap, SGE_CONTROL_A,
7102 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7103 EGRSTATUSPAGESIZE_F,
7104 INGPADBOUNDARY_V(ingpad) |
7105 EGRSTATUSPAGESIZE_V(stat_len != 64));
7106 t4_set_reg_field(adap, SGE_CONTROL2_A,
7107 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
7108 INGPACKBOUNDARY_V(ingpack));
7111 * Adjust various SGE Free List Host Buffer Sizes.
7113 * This is something of a crock since we're using fixed indices into
7114 * the array which are also known by the sge.c code and the T4
7115 * Firmware Configuration File. We need to come up with a much better
7116 * approach to managing this array. For now, the first four entries
7121 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7122 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7124 * For the single-MTU buffers in unpacked mode we need to include
7125 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7126 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7127 * Padding boundary. All of these are accommodated in the Factory
7128 * Default Firmware Configuration File but we need to adjust it for
7129 * this host's cache line size.
7131 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7132 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7133 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
7135 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7136 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
7139 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
7145 * t4_fw_initialize - ask FW to initialize the device
7146 * @adap: the adapter
7147 * @mbox: mailbox to use for the FW command
7149 * Issues a command to FW to partially initialize the device. This
7150 * performs initialization that generally doesn't depend on user input.
7152 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7154 struct fw_initialize_cmd c;
7156 memset(&c, 0, sizeof(c));
7157 INIT_CMD(c, INITIALIZE, WRITE);
7158 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7162 * t4_query_params_rw - query FW or device parameters
7163 * @adap: the adapter
7164 * @mbox: mailbox to use for the FW command
7167 * @nparams: the number of parameters
7168 * @params: the parameter names
7169 * @val: the parameter values
7170 * @rw: Write and read flag
7171 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7173 * Reads the value of FW or device parameters. Up to 7 parameters can be
7176 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7177 unsigned int vf, unsigned int nparams, const u32 *params,
7178 u32 *val, int rw, bool sleep_ok)
7181 struct fw_params_cmd c;
7182 __be32 *p = &c.param[0].mnem;
7187 memset(&c, 0, sizeof(c));
7188 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7189 FW_CMD_REQUEST_F | FW_CMD_READ_F |
7190 FW_PARAMS_CMD_PFN_V(pf) |
7191 FW_PARAMS_CMD_VFN_V(vf));
7192 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7194 for (i = 0; i < nparams; i++) {
7195 *p++ = cpu_to_be32(*params++);
7197 *p = cpu_to_be32(*(val + i));
7201 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7203 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7204 *val++ = be32_to_cpu(*p);
7208 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7209 unsigned int vf, unsigned int nparams, const u32 *params,
7212 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7216 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7217 unsigned int vf, unsigned int nparams, const u32 *params,
7220 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7225 * t4_set_params_timeout - sets FW or device parameters
7226 * @adap: the adapter
7227 * @mbox: mailbox to use for the FW command
7230 * @nparams: the number of parameters
7231 * @params: the parameter names
7232 * @val: the parameter values
7233 * @timeout: the timeout time
7235 * Sets the value of FW or device parameters. Up to 7 parameters can be
7236 * specified at once.
7238 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7239 unsigned int pf, unsigned int vf,
7240 unsigned int nparams, const u32 *params,
7241 const u32 *val, int timeout)
7243 struct fw_params_cmd c;
7244 __be32 *p = &c.param[0].mnem;
7249 memset(&c, 0, sizeof(c));
7250 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7251 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7252 FW_PARAMS_CMD_PFN_V(pf) |
7253 FW_PARAMS_CMD_VFN_V(vf));
7254 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7257 *p++ = cpu_to_be32(*params++);
7258 *p++ = cpu_to_be32(*val++);
7261 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7265 * t4_set_params - sets FW or device parameters
7266 * @adap: the adapter
7267 * @mbox: mailbox to use for the FW command
7270 * @nparams: the number of parameters
7271 * @params: the parameter names
7272 * @val: the parameter values
7274 * Sets the value of FW or device parameters. Up to 7 parameters can be
7275 * specified at once.
7277 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7278 unsigned int vf, unsigned int nparams, const u32 *params,
7281 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7282 FW_CMD_MAX_TIMEOUT);
7286 * t4_cfg_pfvf - configure PF/VF resource limits
7287 * @adap: the adapter
7288 * @mbox: mailbox to use for the FW command
7289 * @pf: the PF being configured
7290 * @vf: the VF being configured
7291 * @txq: the max number of egress queues
7292 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7293 * @rxqi: the max number of interrupt-capable ingress queues
7294 * @rxq: the max number of interruptless ingress queues
7295 * @tc: the PCI traffic class
7296 * @vi: the max number of virtual interfaces
7297 * @cmask: the channel access rights mask for the PF/VF
7298 * @pmask: the port access rights mask for the PF/VF
7299 * @nexact: the maximum number of exact MPS filters
7300 * @rcaps: read capabilities
7301 * @wxcaps: write/execute capabilities
7303 * Configures resource limits and capabilities for a physical or virtual
7306 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7307 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7308 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7309 unsigned int vi, unsigned int cmask, unsigned int pmask,
7310 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7312 struct fw_pfvf_cmd c;
7314 memset(&c, 0, sizeof(c));
7315 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7316 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7317 FW_PFVF_CMD_VFN_V(vf));
7318 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7319 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7320 FW_PFVF_CMD_NIQ_V(rxq));
7321 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7322 FW_PFVF_CMD_PMASK_V(pmask) |
7323 FW_PFVF_CMD_NEQ_V(txq));
7324 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7325 FW_PFVF_CMD_NVI_V(vi) |
7326 FW_PFVF_CMD_NEXACTF_V(nexact));
7327 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7328 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7329 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
7330 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7334 * t4_alloc_vi - allocate a virtual interface
7335 * @adap: the adapter
7336 * @mbox: mailbox to use for the FW command
7337 * @port: physical port associated with the VI
7338 * @pf: the PF owning the VI
7339 * @vf: the VF owning the VI
7340 * @nmac: number of MAC addresses needed (1 to 5)
7341 * @mac: the MAC addresses of the VI
7342 * @rss_size: size of RSS table slice associated with this VI
7344 * Allocates a virtual interface for the given physical port. If @mac is
7345 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7346 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7347 * stored consecutively so the space needed is @nmac * 6 bytes.
7348 * Returns a negative error number or the non-negative VI id.
7350 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7351 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7352 unsigned int *rss_size)
7357 memset(&c, 0, sizeof(c));
7358 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7359 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7360 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7361 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
7362 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
7365 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7370 memcpy(mac, c.mac, sizeof(c.mac));
7373 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7375 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7377 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7379 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7383 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7384 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
7388 * t4_free_vi - free a virtual interface
7389 * @adap: the adapter
7390 * @mbox: mailbox to use for the FW command
7391 * @pf: the PF owning the VI
7392 * @vf: the VF owning the VI
7393 * @viid: virtual interface identifiler
7395 * Free a previously allocated virtual interface.
7397 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7398 unsigned int vf, unsigned int viid)
7402 memset(&c, 0, sizeof(c));
7403 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7406 FW_VI_CMD_PFN_V(pf) |
7407 FW_VI_CMD_VFN_V(vf));
7408 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7409 c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7411 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7415 * t4_set_rxmode - set Rx properties of a virtual interface
7416 * @adap: the adapter
7417 * @mbox: mailbox to use for the FW command
7419 * @mtu: the new MTU or -1
7420 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7421 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7422 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7423 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7424 * @sleep_ok: if true we may sleep while awaiting command completion
7426 * Sets Rx properties of a virtual interface.
7428 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7429 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7432 struct fw_vi_rxmode_cmd c;
7434 /* convert to FW values */
7436 mtu = FW_RXMODE_MTU_NO_CHG;
7438 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
7440 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
7442 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
7444 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
7446 memset(&c, 0, sizeof(c));
7447 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7448 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7449 FW_VI_RXMODE_CMD_VIID_V(viid));
7450 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7452 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7453 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7454 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7455 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7456 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
7457 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7461 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7462 * @adap: the adapter
7463 * @mbox: mailbox to use for the FW command
7465 * @free: if true any existing filters for this VI id are first removed
7466 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7467 * @addr: the MAC address(es)
7468 * @idx: where to store the index of each allocated filter
7469 * @hash: pointer to hash address filter bitmap
7470 * @sleep_ok: call is allowed to sleep
7472 * Allocates an exact-match filter for each of the supplied addresses and
7473 * sets it to the corresponding address. If @idx is not %NULL it should
7474 * have at least @naddr entries, each of which will be set to the index of
7475 * the filter allocated for the corresponding MAC address. If a filter
7476 * could not be allocated for an address its index is set to 0xffff.
7477 * If @hash is not %NULL addresses that fail to allocate an exact filter
7478 * are hashed and update the hash filter bitmap pointed at by @hash.
7480 * Returns a negative error number or the number of filters allocated.
7482 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7483 unsigned int viid, bool free, unsigned int naddr,
7484 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7486 int offset, ret = 0;
7487 struct fw_vi_mac_cmd c;
7488 unsigned int nfilters = 0;
7489 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
7490 unsigned int rem = naddr;
7492 if (naddr > max_naddr)
7495 for (offset = 0; offset < naddr ; /**/) {
7496 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
7497 rem : ARRAY_SIZE(c.u.exact));
7498 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7499 u.exact[fw_naddr]), 16);
7500 struct fw_vi_mac_exact *p;
7503 memset(&c, 0, sizeof(c));
7504 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7507 FW_CMD_EXEC_V(free) |
7508 FW_VI_MAC_CMD_VIID_V(viid));
7509 c.freemacs_to_len16 =
7510 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
7511 FW_CMD_LEN16_V(len16));
7513 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7515 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7516 FW_VI_MAC_CMD_IDX_V(
7517 FW_VI_MAC_ADD_MAC));
7518 memcpy(p->macaddr, addr[offset + i],
7519 sizeof(p->macaddr));
7522 /* It's okay if we run out of space in our MAC address arena.
7523 * Some of the addresses we submit may get stored so we need
7524 * to run through the reply to see what the results were ...
7526 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7527 if (ret && ret != -FW_ENOMEM)
7530 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7531 u16 index = FW_VI_MAC_CMD_IDX_G(
7532 be16_to_cpu(p->valid_to_idx));
7535 idx[offset + i] = (index >= max_naddr ?
7537 if (index < max_naddr)
7541 hash_mac_addr(addr[offset + i]));
7549 if (ret == 0 || ret == -FW_ENOMEM)
7555 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
7556 * @adap: the adapter
7557 * @mbox: mailbox to use for the FW command
7559 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7560 * @addr: the MAC address(es)
7561 * @sleep_ok: call is allowed to sleep
7563 * Frees the exact-match filter for each of the supplied addresses
7565 * Returns a negative error number or the number of filters freed.
7567 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
7568 unsigned int viid, unsigned int naddr,
7569 const u8 **addr, bool sleep_ok)
7571 int offset, ret = 0;
7572 struct fw_vi_mac_cmd c;
7573 unsigned int nfilters = 0;
7574 unsigned int max_naddr = is_t4(adap->params.chip) ?
7575 NUM_MPS_CLS_SRAM_L_INSTANCES :
7576 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
7577 unsigned int rem = naddr;
7579 if (naddr > max_naddr)
7582 for (offset = 0; offset < (int)naddr ; /**/) {
7583 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7585 : ARRAY_SIZE(c.u.exact));
7586 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7587 u.exact[fw_naddr]), 16);
7588 struct fw_vi_mac_exact *p;
7591 memset(&c, 0, sizeof(c));
7592 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7596 FW_VI_MAC_CMD_VIID_V(viid));
7597 c.freemacs_to_len16 =
7598 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7599 FW_CMD_LEN16_V(len16));
7601 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
7602 p->valid_to_idx = cpu_to_be16(
7603 FW_VI_MAC_CMD_VALID_F |
7604 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
7605 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7608 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7612 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7613 u16 index = FW_VI_MAC_CMD_IDX_G(
7614 be16_to_cpu(p->valid_to_idx));
7616 if (index < max_naddr)
7630 * t4_change_mac - modifies the exact-match filter for a MAC address
7631 * @adap: the adapter
7632 * @mbox: mailbox to use for the FW command
7634 * @idx: index of existing filter for old value of MAC address, or -1
7635 * @addr: the new MAC address value
7636 * @persist: whether a new MAC allocation should be persistent
7637 * @add_smt: if true also add the address to the HW SMT
7639 * Modifies an exact-match filter and sets it to the new MAC address.
7640 * Note that in general it is not possible to modify the value of a given
7641 * filter so the generic way to modify an address filter is to free the one
7642 * being used by the old address value and allocate a new filter for the
7643 * new address value. @idx can be -1 if the address is a new addition.
7645 * Returns a negative error number or the index of the filter with the new
7648 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7649 int idx, const u8 *addr, bool persist, bool add_smt)
7652 struct fw_vi_mac_cmd c;
7653 struct fw_vi_mac_exact *p = c.u.exact;
7654 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
7656 if (idx < 0) /* new allocation */
7657 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7658 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7660 memset(&c, 0, sizeof(c));
7661 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7662 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7663 FW_VI_MAC_CMD_VIID_V(viid));
7664 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
7665 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7666 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
7667 FW_VI_MAC_CMD_IDX_V(idx));
7668 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7670 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7672 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7673 if (ret >= max_mac_addr)
7680 * t4_set_addr_hash - program the MAC inexact-match hash filter
7681 * @adap: the adapter
7682 * @mbox: mailbox to use for the FW command
7684 * @ucast: whether the hash filter should also match unicast addresses
7685 * @vec: the value to be written to the hash filter
7686 * @sleep_ok: call is allowed to sleep
7688 * Sets the 64-bit inexact-match hash filter for a virtual interface.
7690 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7691 bool ucast, u64 vec, bool sleep_ok)
7693 struct fw_vi_mac_cmd c;
7695 memset(&c, 0, sizeof(c));
7696 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7697 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7698 FW_VI_ENABLE_CMD_VIID_V(viid));
7699 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
7700 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
7702 c.u.hash.hashvec = cpu_to_be64(vec);
7703 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7707 * t4_enable_vi_params - enable/disable a virtual interface
7708 * @adap: the adapter
7709 * @mbox: mailbox to use for the FW command
7711 * @rx_en: 1=enable Rx, 0=disable Rx
7712 * @tx_en: 1=enable Tx, 0=disable Tx
7713 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7715 * Enables/disables a virtual interface. Note that setting DCB Enable
7716 * only makes sense when enabling a Virtual Interface ...
7718 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7719 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7721 struct fw_vi_enable_cmd c;
7723 memset(&c, 0, sizeof(c));
7724 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7725 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7726 FW_VI_ENABLE_CMD_VIID_V(viid));
7727 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
7728 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
7729 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
7731 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7735 * t4_enable_vi - enable/disable a virtual interface
7736 * @adap: the adapter
7737 * @mbox: mailbox to use for the FW command
7739 * @rx_en: 1=enable Rx, 0=disable Rx
7740 * @tx_en: 1=enable Tx, 0=disable Tx
7742 * Enables/disables a virtual interface.
7744 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7745 bool rx_en, bool tx_en)
7747 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7751 * t4_identify_port - identify a VI's port by blinking its LED
7752 * @adap: the adapter
7753 * @mbox: mailbox to use for the FW command
7755 * @nblinks: how many times to blink LED at 2.5 Hz
7757 * Identifies a VI's port by blinking its LED.
7759 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7760 unsigned int nblinks)
7762 struct fw_vi_enable_cmd c;
7764 memset(&c, 0, sizeof(c));
7765 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7766 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7767 FW_VI_ENABLE_CMD_VIID_V(viid));
7768 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
7769 c.blinkdur = cpu_to_be16(nblinks);
7770 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7774 * t4_iq_stop - stop an ingress queue and its FLs
7775 * @adap: the adapter
7776 * @mbox: mailbox to use for the FW command
7777 * @pf: the PF owning the queues
7778 * @vf: the VF owning the queues
7779 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7780 * @iqid: ingress queue id
7781 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7782 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7784 * Stops an ingress queue and its associated FLs, if any. This causes
7785 * any current or future data/messages destined for these queues to be
7788 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7789 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7790 unsigned int fl0id, unsigned int fl1id)
7794 memset(&c, 0, sizeof(c));
7795 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7796 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7797 FW_IQ_CMD_VFN_V(vf));
7798 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
7799 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7800 c.iqid = cpu_to_be16(iqid);
7801 c.fl0id = cpu_to_be16(fl0id);
7802 c.fl1id = cpu_to_be16(fl1id);
7803 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7807 * t4_iq_free - free an ingress queue and its FLs
7808 * @adap: the adapter
7809 * @mbox: mailbox to use for the FW command
7810 * @pf: the PF owning the queues
7811 * @vf: the VF owning the queues
7812 * @iqtype: the ingress queue type
7813 * @iqid: ingress queue id
7814 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7815 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7817 * Frees an ingress queue and its associated FLs, if any.
7819 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7820 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7821 unsigned int fl0id, unsigned int fl1id)
7825 memset(&c, 0, sizeof(c));
7826 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7827 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7828 FW_IQ_CMD_VFN_V(vf));
7829 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
7830 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7831 c.iqid = cpu_to_be16(iqid);
7832 c.fl0id = cpu_to_be16(fl0id);
7833 c.fl1id = cpu_to_be16(fl1id);
7834 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7838 * t4_eth_eq_free - free an Ethernet egress queue
7839 * @adap: the adapter
7840 * @mbox: mailbox to use for the FW command
7841 * @pf: the PF owning the queue
7842 * @vf: the VF owning the queue
7843 * @eqid: egress queue id
7845 * Frees an Ethernet egress queue.
7847 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7848 unsigned int vf, unsigned int eqid)
7850 struct fw_eq_eth_cmd c;
7852 memset(&c, 0, sizeof(c));
7853 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
7854 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7855 FW_EQ_ETH_CMD_PFN_V(pf) |
7856 FW_EQ_ETH_CMD_VFN_V(vf));
7857 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
7858 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
7859 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7863 * t4_ctrl_eq_free - free a control egress queue
7864 * @adap: the adapter
7865 * @mbox: mailbox to use for the FW command
7866 * @pf: the PF owning the queue
7867 * @vf: the VF owning the queue
7868 * @eqid: egress queue id
7870 * Frees a control egress queue.
7872 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7873 unsigned int vf, unsigned int eqid)
7875 struct fw_eq_ctrl_cmd c;
7877 memset(&c, 0, sizeof(c));
7878 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
7879 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7880 FW_EQ_CTRL_CMD_PFN_V(pf) |
7881 FW_EQ_CTRL_CMD_VFN_V(vf));
7882 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
7883 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
7884 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7888 * t4_ofld_eq_free - free an offload egress queue
7889 * @adap: the adapter
7890 * @mbox: mailbox to use for the FW command
7891 * @pf: the PF owning the queue
7892 * @vf: the VF owning the queue
7893 * @eqid: egress queue id
7895 * Frees a control egress queue.
7897 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7898 unsigned int vf, unsigned int eqid)
7900 struct fw_eq_ofld_cmd c;
7902 memset(&c, 0, sizeof(c));
7903 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
7904 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7905 FW_EQ_OFLD_CMD_PFN_V(pf) |
7906 FW_EQ_OFLD_CMD_VFN_V(vf));
7907 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
7908 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
7909 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7913 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7914 * @adap: the adapter
7915 * @link_down_rc: Link Down Reason Code
7917 * Returns a string representation of the Link Down Reason Code.
7919 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
7921 static const char * const reason[] = {
7924 "Auto-negotiation Failure",
7926 "Insufficient Airflow",
7927 "Unable To Determine Reason",
7928 "No RX Signal Detected",
7932 if (link_down_rc >= ARRAY_SIZE(reason))
7933 return "Bad Reason Code";
7935 return reason[link_down_rc];
7939 * Return the highest speed set in the port capabilities, in Mb/s.
7941 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
7943 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
7945 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
7949 TEST_SPEED_RETURN(400G, 400000);
7950 TEST_SPEED_RETURN(200G, 200000);
7951 TEST_SPEED_RETURN(100G, 100000);
7952 TEST_SPEED_RETURN(50G, 50000);
7953 TEST_SPEED_RETURN(40G, 40000);
7954 TEST_SPEED_RETURN(25G, 25000);
7955 TEST_SPEED_RETURN(10G, 10000);
7956 TEST_SPEED_RETURN(1G, 1000);
7957 TEST_SPEED_RETURN(100M, 100);
7959 #undef TEST_SPEED_RETURN
7965 * fwcap_to_fwspeed - return highest speed in Port Capabilities
7966 * @acaps: advertised Port Capabilities
7968 * Get the highest speed for the port from the advertised Port
7969 * Capabilities. It will be either the highest speed from the list of
7970 * speeds or whatever user has set using ethtool.
7972 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
7974 #define TEST_SPEED_RETURN(__caps_speed) \
7976 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
7977 return FW_PORT_CAP32_SPEED_##__caps_speed; \
7980 TEST_SPEED_RETURN(400G);
7981 TEST_SPEED_RETURN(200G);
7982 TEST_SPEED_RETURN(100G);
7983 TEST_SPEED_RETURN(50G);
7984 TEST_SPEED_RETURN(40G);
7985 TEST_SPEED_RETURN(25G);
7986 TEST_SPEED_RETURN(10G);
7987 TEST_SPEED_RETURN(1G);
7988 TEST_SPEED_RETURN(100M);
7990 #undef TEST_SPEED_RETURN
7996 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
7997 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
7999 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8000 * 32-bit Port Capabilities value.
8002 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
8004 fw_port_cap32_t linkattr = 0;
8006 /* Unfortunately the format of the Link Status in the old
8007 * 16-bit Port Information message isn't the same as the
8008 * 16-bit Port Capabilities bitfield used everywhere else ...
8010 if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8011 linkattr |= FW_PORT_CAP32_FC_RX;
8012 if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8013 linkattr |= FW_PORT_CAP32_FC_TX;
8014 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8015 linkattr |= FW_PORT_CAP32_SPEED_100M;
8016 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8017 linkattr |= FW_PORT_CAP32_SPEED_1G;
8018 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8019 linkattr |= FW_PORT_CAP32_SPEED_10G;
8020 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8021 linkattr |= FW_PORT_CAP32_SPEED_25G;
8022 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8023 linkattr |= FW_PORT_CAP32_SPEED_40G;
8024 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8025 linkattr |= FW_PORT_CAP32_SPEED_100G;
8031 * t4_handle_get_port_info - process a FW reply message
8032 * @pi: the port info
8033 * @rpl: start of the FW message
8035 * Processes a GET_PORT_INFO FW reply message.
8037 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8039 const struct fw_port_cmd *cmd = (const void *)rpl;
8040 int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8041 struct adapter *adapter = pi->adapter;
8042 struct link_config *lc = &pi->link_cfg;
8043 int link_ok, linkdnrc;
8044 enum fw_port_type port_type;
8045 enum fw_port_module_type mod_type;
8046 unsigned int speed, fc, fec;
8047 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8049 /* Extract the various fields from the Port Information message.
8052 case FW_PORT_ACTION_GET_PORT_INFO: {
8053 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8055 link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8056 linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8057 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8058 mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8059 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8060 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8061 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8062 linkattr = lstatus_to_fwcap(lstatus);
8066 case FW_PORT_ACTION_GET_PORT_INFO32: {
8069 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8070 link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8071 linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8072 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8073 mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8074 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8075 acaps = be32_to_cpu(cmd->u.info32.acaps32);
8076 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8077 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8082 dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8083 be32_to_cpu(cmd->action_to_len16));
8087 fec = fwcap_to_cc_fec(acaps);
8088 fc = fwcap_to_cc_pause(linkattr);
8089 speed = fwcap_to_speed(linkattr);
8091 if (mod_type != pi->mod_type) {
8092 /* With the newer SFP28 and QSFP28 Transceiver Module Types,
8093 * various fundamental Port Capabilities which used to be
8094 * immutable can now change radically. We can now have
8095 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8096 * all change based on what Transceiver Module is inserted.
8097 * So we need to record the Physical "Port" Capabilities on
8098 * every Transceiver Module change.
8102 /* When a new Transceiver Module is inserted, the Firmware
8103 * will examine its i2c EPROM to determine its type and
8104 * general operating parameters including things like Forward
8105 * Error Control, etc. Various IEEE 802.3 standards dictate
8106 * how to interpret these i2c values to determine default
8107 * "sutomatic" settings. We record these for future use when
8108 * the user explicitly requests these standards-based values.
8110 lc->def_acaps = acaps;
8112 /* Some versions of the early T6 Firmware "cheated" when
8113 * handling different Transceiver Modules by changing the
8114 * underlaying Port Type reported to the Host Drivers. As
8115 * such we need to capture whatever Port Type the Firmware
8116 * sends us and record it in case it's different from what we
8117 * were told earlier. Unfortunately, since Firmware is
8118 * forever, we'll need to keep this code here forever, but in
8119 * later T6 Firmware it should just be an assignment of the
8120 * same value already recorded.
8122 pi->port_type = port_type;
8124 pi->mod_type = mod_type;
8125 t4_os_portmod_changed(adapter, pi->port_id);
8128 if (link_ok != lc->link_ok || speed != lc->speed ||
8129 fc != lc->fc || fec != lc->fec) { /* something changed */
8130 if (!link_ok && lc->link_ok) {
8131 lc->link_down_rc = linkdnrc;
8132 dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
8133 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
8135 lc->link_ok = link_ok;
8140 lc->lpacaps = lpacaps;
8141 lc->acaps = acaps & ADVERT_MASK;
8143 if (lc->acaps & FW_PORT_CAP32_ANEG) {
8144 lc->autoneg = AUTONEG_ENABLE;
8146 /* When Autoneg is disabled, user needs to set
8148 * Similar to cxgb4_ethtool.c: set_link_ksettings
8151 lc->speed_caps = fwcap_to_fwspeed(acaps);
8152 lc->autoneg = AUTONEG_DISABLE;
8155 t4_os_link_changed(adapter, pi->port_id, link_ok);
8160 * t4_update_port_info - retrieve and update port information if changed
8161 * @pi: the port_info
8163 * We issue a Get Port Information Command to the Firmware and, if
8164 * successful, we check to see if anything is different from what we
8165 * last recorded and update things accordingly.
8167 int t4_update_port_info(struct port_info *pi)
8169 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8170 struct fw_port_cmd port_cmd;
8173 memset(&port_cmd, 0, sizeof(port_cmd));
8174 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8175 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8176 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8177 port_cmd.action_to_len16 = cpu_to_be32(
8178 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8179 ? FW_PORT_ACTION_GET_PORT_INFO
8180 : FW_PORT_ACTION_GET_PORT_INFO32) |
8181 FW_LEN16(port_cmd));
8182 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8183 &port_cmd, sizeof(port_cmd), &port_cmd);
8187 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8192 * t4_get_link_params - retrieve basic link parameters for given port
8194 * @link_okp: value return pointer for link up/down
8195 * @speedp: value return pointer for speed (Mb/s)
8196 * @mtup: value return pointer for mtu
8198 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8199 * and MTU for a specified port. A negative error is returned on
8200 * failure; 0 on success.
8202 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8203 unsigned int *speedp, unsigned int *mtup)
8205 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8206 struct fw_port_cmd port_cmd;
8207 unsigned int action, link_ok, speed, mtu;
8208 fw_port_cap32_t linkattr;
8211 memset(&port_cmd, 0, sizeof(port_cmd));
8212 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8213 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8214 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8215 action = (fw_caps == FW_CAPS16
8216 ? FW_PORT_ACTION_GET_PORT_INFO
8217 : FW_PORT_ACTION_GET_PORT_INFO32);
8218 port_cmd.action_to_len16 = cpu_to_be32(
8219 FW_PORT_CMD_ACTION_V(action) |
8220 FW_LEN16(port_cmd));
8221 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8222 &port_cmd, sizeof(port_cmd), &port_cmd);
8226 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8227 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8229 link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8230 linkattr = lstatus_to_fwcap(lstatus);
8231 mtu = be16_to_cpu(port_cmd.u.info.mtu);
8234 be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8236 link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8237 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8238 mtu = FW_PORT_CMD_MTU32_G(
8239 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8241 speed = fwcap_to_speed(linkattr);
8243 *link_okp = link_ok;
8244 *speedp = fwcap_to_speed(linkattr);
8251 * t4_handle_fw_rpl - process a FW reply message
8252 * @adap: the adapter
8253 * @rpl: start of the FW message
8255 * Processes a FW message, such as link state change messages.
8257 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8259 u8 opcode = *(const u8 *)rpl;
8261 /* This might be a port command ... this simplifies the following
8262 * conditionals ... We can get away with pre-dereferencing
8263 * action_to_len16 because it's in the first 16 bytes and all messages
8264 * will be at least that long.
8266 const struct fw_port_cmd *p = (const void *)rpl;
8267 unsigned int action =
8268 FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8270 if (opcode == FW_PORT_CMD &&
8271 (action == FW_PORT_ACTION_GET_PORT_INFO ||
8272 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8274 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
8275 struct port_info *pi = NULL;
8277 for_each_port(adap, i) {
8278 pi = adap2pinfo(adap, i);
8279 if (pi->tx_chan == chan)
8283 t4_handle_get_port_info(pi, rpl);
8285 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8292 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
8296 if (pci_is_pcie(adapter->pdev)) {
8297 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
8298 p->speed = val & PCI_EXP_LNKSTA_CLS;
8299 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8304 * init_link_config - initialize a link's SW state
8305 * @lc: pointer to structure holding the link state
8306 * @pcaps: link Port Capabilities
8307 * @acaps: link current Advertised Port Capabilities
8309 * Initializes the SW state maintained for each link, including the link's
8310 * capabilities and default speed/flow-control/autonegotiation settings.
8312 static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8313 fw_port_cap32_t acaps)
8316 lc->def_acaps = acaps;
8320 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8322 /* For Forward Error Control, we default to whatever the Firmware
8323 * tells us the Link is currently advertising.
8325 lc->requested_fec = FEC_AUTO;
8326 lc->fec = fwcap_to_cc_fec(lc->def_acaps);
8328 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8329 lc->acaps = lc->pcaps & ADVERT_MASK;
8330 lc->autoneg = AUTONEG_ENABLE;
8331 lc->requested_fc |= PAUSE_AUTONEG;
8334 lc->autoneg = AUTONEG_DISABLE;
8338 #define CIM_PF_NOACCESS 0xeeeeeeee
8340 int t4_wait_dev_ready(void __iomem *regs)
8344 whoami = readl(regs + PL_WHOAMI_A);
8345 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
8349 whoami = readl(regs + PL_WHOAMI_A);
8350 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
8354 u32 vendor_and_model_id;
8358 static int t4_get_flash_params(struct adapter *adap)
8360 /* Table for non-Numonix supported flash parts. Numonix parts are left
8361 * to the preexisting code. All flash parts have 64KB sectors.
8363 static struct flash_desc supported_flash[] = {
8364 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8367 unsigned int part, manufacturer;
8368 unsigned int density, size;
8372 /* Issue a Read ID Command to the Flash part. We decode supported
8373 * Flash parts and their sizes from this. There's a newer Query
8374 * Command which can retrieve detailed geometry information but many
8375 * Flash parts don't support it.
8378 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
8380 ret = sf1_read(adap, 3, 0, 1, &flashid);
8381 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
8385 /* Check to see if it's one of our non-standard supported Flash parts.
8387 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8388 if (supported_flash[part].vendor_and_model_id == flashid) {
8389 adap->params.sf_size = supported_flash[part].size_mb;
8390 adap->params.sf_nsec =
8391 adap->params.sf_size / SF_SEC_SIZE;
8395 /* Decode Flash part size. The code below looks repetative with
8396 * common encodings, but that's not guaranteed in the JEDEC
8397 * specification for the Read JADEC ID command. The only thing that
8398 * we're guaranteed by the JADEC specification is where the
8399 * Manufacturer ID is in the returned result. After that each
8400 * Manufacturer ~could~ encode things completely differently.
8401 * Note, all Flash parts must have 64KB sectors.
8403 manufacturer = flashid & 0xff;
8404 switch (manufacturer) {
8405 case 0x20: { /* Micron/Numonix */
8406 /* This Density -> Size decoding table is taken from Micron
8409 density = (flashid >> 16) & 0xff;
8411 case 0x14: /* 1MB */
8414 case 0x15: /* 2MB */
8417 case 0x16: /* 4MB */
8420 case 0x17: /* 8MB */
8423 case 0x18: /* 16MB */
8426 case 0x19: /* 32MB */
8429 case 0x20: /* 64MB */
8432 case 0x21: /* 128MB */
8435 case 0x22: /* 256MB */
8440 dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
8446 case 0xc2: { /* Macronix */
8447 /* This Density -> Size decoding table is taken from Macronix
8450 density = (flashid >> 16) & 0xff;
8452 case 0x17: /* 8MB */
8455 case 0x18: /* 16MB */
8459 dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
8465 case 0xef: { /* Winbond */
8466 /* This Density -> Size decoding table is taken from Winbond
8469 density = (flashid >> 16) & 0xff;
8471 case 0x17: /* 8MB */
8474 case 0x18: /* 16MB */
8478 dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
8485 dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
8490 /* Store decoded Flash size and fall through into vetting code. */
8491 adap->params.sf_size = size;
8492 adap->params.sf_nsec = size / SF_SEC_SIZE;
8495 if (adap->params.sf_size < FLASH_MIN_SIZE)
8496 dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8497 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
8501 static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
8506 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
8508 pci_read_config_word(adapter->pdev,
8509 pcie_cap + PCI_EXP_DEVCTL2, &val);
8510 val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
8512 pci_write_config_word(adapter->pdev,
8513 pcie_cap + PCI_EXP_DEVCTL2, val);
8518 * t4_prep_adapter - prepare SW and HW for operation
8519 * @adapter: the adapter
8520 * @reset: if true perform a HW reset
8522 * Initialize adapter SW state for the various HW modules, set initial
8523 * values for some adapter tunables, take PHYs out of reset, and
8524 * initialize the MDIO interface.
8526 int t4_prep_adapter(struct adapter *adapter)
8532 get_pci_mode(adapter, &adapter->params.pci);
8533 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
8535 ret = t4_get_flash_params(adapter);
8537 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
8541 /* Retrieve adapter's device ID
8543 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
8544 ver = device_id >> 12;
8545 adapter->params.chip = 0;
8548 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
8549 adapter->params.arch.sge_fl_db = DBPRIO_F;
8550 adapter->params.arch.mps_tcam_size =
8551 NUM_MPS_CLS_SRAM_L_INSTANCES;
8552 adapter->params.arch.mps_rplc_size = 128;
8553 adapter->params.arch.nchan = NCHAN;
8554 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
8555 adapter->params.arch.vfcount = 128;
8556 /* Congestion map is for 4 channels so that
8557 * MPS can have 4 priority per port.
8559 adapter->params.arch.cng_ch_bits_log = 2;
8562 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
8563 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
8564 adapter->params.arch.mps_tcam_size =
8565 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8566 adapter->params.arch.mps_rplc_size = 128;
8567 adapter->params.arch.nchan = NCHAN;
8568 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
8569 adapter->params.arch.vfcount = 128;
8570 adapter->params.arch.cng_ch_bits_log = 2;
8573 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
8574 adapter->params.arch.sge_fl_db = 0;
8575 adapter->params.arch.mps_tcam_size =
8576 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8577 adapter->params.arch.mps_rplc_size = 256;
8578 adapter->params.arch.nchan = 2;
8579 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
8580 adapter->params.arch.vfcount = 256;
8581 /* Congestion map will be for 2 channels so that
8582 * MPS can have 8 priority per port.
8584 adapter->params.arch.cng_ch_bits_log = 3;
8587 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
8592 adapter->params.cim_la_size = CIMLA_SIZE;
8593 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8596 * Default port for debugging in case we can't reach FW.
8598 adapter->params.nports = 1;
8599 adapter->params.portvec = 1;
8600 adapter->params.vpd.cclk = 50000;
8602 /* Set pci completion timeout value to 4 seconds. */
8603 set_pcie_completion_timeout(adapter, 0xd);
8608 * t4_shutdown_adapter - shut down adapter, host & wire
8609 * @adapter: the adapter
8611 * Perform an emergency shutdown of the adapter and stop it from
8612 * continuing any further communication on the ports or DMA to the
8613 * host. This is typically used when the adapter and/or firmware
8614 * have crashed and we want to prevent any further accidental
8615 * communication with the rest of the world. This will also force
8616 * the port Link Status to go down -- if register writes work --
8617 * which should help our peers figure out that we're down.
8619 int t4_shutdown_adapter(struct adapter *adapter)
8623 t4_intr_disable(adapter);
8624 t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
8625 for_each_port(adapter, port) {
8626 u32 a_port_cfg = is_t4(adapter->params.chip) ?
8627 PORT_REG(port, XGMAC_PORT_CFG_A) :
8628 T5_PORT_REG(port, MAC_PORT_CFG_A);
8630 t4_write_reg(adapter, a_port_cfg,
8631 t4_read_reg(adapter, a_port_cfg)
8632 & ~SIGNAL_DET_V(1));
8634 t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
8640 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
8641 * @adapter: the adapter
8642 * @qid: the Queue ID
8643 * @qtype: the Ingress or Egress type for @qid
8644 * @user: true if this request is for a user mode queue
8645 * @pbar2_qoffset: BAR2 Queue Offset
8646 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
8648 * Returns the BAR2 SGE Queue Registers information associated with the
8649 * indicated Absolute Queue ID. These are passed back in return value
8650 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
8651 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
8653 * This may return an error which indicates that BAR2 SGE Queue
8654 * registers aren't available. If an error is not returned, then the
8655 * following values are returned:
8657 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
8658 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
8660 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
8661 * require the "Inferred Queue ID" ability may be used. E.g. the
8662 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
8663 * then these "Inferred Queue ID" register may not be used.
8665 int t4_bar2_sge_qregs(struct adapter *adapter,
8667 enum t4_bar2_qtype qtype,
8670 unsigned int *pbar2_qid)
8672 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
8673 u64 bar2_page_offset, bar2_qoffset;
8674 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
8676 /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
8677 if (!user && is_t4(adapter->params.chip))
8680 /* Get our SGE Page Size parameters.
8682 page_shift = adapter->params.sge.hps + 10;
8683 page_size = 1 << page_shift;
8685 /* Get the right Queues per Page parameters for our Queue.
8687 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
8688 ? adapter->params.sge.eq_qpp
8689 : adapter->params.sge.iq_qpp);
8690 qpp_mask = (1 << qpp_shift) - 1;
8692 /* Calculate the basics of the BAR2 SGE Queue register area:
8693 * o The BAR2 page the Queue registers will be in.
8694 * o The BAR2 Queue ID.
8695 * o The BAR2 Queue ID Offset into the BAR2 page.
8697 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
8698 bar2_qid = qid & qpp_mask;
8699 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
8701 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
8702 * hardware will infer the Absolute Queue ID simply from the writes to
8703 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
8704 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
8705 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
8706 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
8707 * from the BAR2 Page and BAR2 Queue ID.
8709 * One important censequence of this is that some BAR2 SGE registers
8710 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
8711 * there. But other registers synthesize the SGE Queue ID purely
8712 * from the writes to the registers -- the Write Combined Doorbell
8713 * Buffer is a good example. These BAR2 SGE Registers are only
8714 * available for those BAR2 SGE Register areas where the SGE Absolute
8715 * Queue ID can be inferred from simple writes.
8717 bar2_qoffset = bar2_page_offset;
8718 bar2_qinferred = (bar2_qid_offset < page_size);
8719 if (bar2_qinferred) {
8720 bar2_qoffset += bar2_qid_offset;
8724 *pbar2_qoffset = bar2_qoffset;
8725 *pbar2_qid = bar2_qid;
8730 * t4_init_devlog_params - initialize adapter->params.devlog
8731 * @adap: the adapter
8733 * Initialize various fields of the adapter's Firmware Device Log
8734 * Parameters structure.
8736 int t4_init_devlog_params(struct adapter *adap)
8738 struct devlog_params *dparams = &adap->params.devlog;
8740 unsigned int devlog_meminfo;
8741 struct fw_devlog_cmd devlog_cmd;
8744 /* If we're dealing with newer firmware, the Device Log Paramerters
8745 * are stored in a designated register which allows us to access the
8746 * Device Log even if we can't talk to the firmware.
8749 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
8751 unsigned int nentries, nentries128;
8753 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
8754 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
8756 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
8757 nentries = (nentries128 + 1) * 128;
8758 dparams->size = nentries * sizeof(struct fw_devlog_e);
8763 /* Otherwise, ask the firmware for it's Device Log Parameters.
8765 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
8766 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
8767 FW_CMD_REQUEST_F | FW_CMD_READ_F);
8768 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
8769 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
8775 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
8776 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
8777 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
8778 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
8784 * t4_init_sge_params - initialize adap->params.sge
8785 * @adapter: the adapter
8787 * Initialize various fields of the adapter's SGE Parameters structure.
8789 int t4_init_sge_params(struct adapter *adapter)
8791 struct sge_params *sge_params = &adapter->params.sge;
8793 unsigned int s_hps, s_qpp;
8795 /* Extract the SGE Page Size for our PF.
8797 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
8798 s_hps = (HOSTPAGESIZEPF0_S +
8799 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
8800 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
8802 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
8804 s_qpp = (QUEUESPERPAGEPF0_S +
8805 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
8806 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
8807 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
8808 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
8809 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
8815 * t4_init_tp_params - initialize adap->params.tp
8816 * @adap: the adapter
8817 * @sleep_ok: if true we may sleep while awaiting command completion
8819 * Initialize various fields of the adapter's TP Parameters structure.
8821 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
8826 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
8827 adap->params.tp.tre = TIMERRESOLUTION_G(v);
8828 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
8830 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8831 for (chan = 0; chan < NCHAN; chan++)
8832 adap->params.tp.tx_modq[chan] = chan;
8834 /* Cache the adapter's Compressed Filter Mode and global Incress
8837 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
8838 TP_VLAN_PRI_MAP_A, sleep_ok);
8839 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
8840 TP_INGRESS_CONFIG_A, sleep_ok);
8842 /* For T6, cache the adapter's compressed error vector
8843 * and passing outer header info for encapsulated packets.
8845 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
8846 v = t4_read_reg(adap, TP_OUT_CONFIG_A);
8847 adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
8850 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
8851 * shift positions of several elements of the Compressed Filter Tuple
8852 * for this adapter which we need frequently ...
8854 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
8855 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
8856 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
8857 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
8858 adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
8859 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
8861 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
8863 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
8865 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
8867 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
8870 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
8871 * represents the presence of an Outer VLAN instead of a VNIC ID.
8873 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
8874 adap->params.tp.vnic_shift = -1;
8876 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
8877 adap->params.tp.hash_filter_mask = v;
8878 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
8879 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
8884 * t4_filter_field_shift - calculate filter field shift
8885 * @adap: the adapter
8886 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8888 * Return the shift position of a filter field within the Compressed
8889 * Filter Tuple. The filter field is specified via its selection bit
8890 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
8892 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8894 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8898 if ((filter_mode & filter_sel) == 0)
8901 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8902 switch (filter_mode & sel) {
8904 field_shift += FT_FCOE_W;
8907 field_shift += FT_PORT_W;
8910 field_shift += FT_VNIC_ID_W;
8913 field_shift += FT_VLAN_W;
8916 field_shift += FT_TOS_W;
8919 field_shift += FT_PROTOCOL_W;
8922 field_shift += FT_ETHERTYPE_W;
8925 field_shift += FT_MACMATCH_W;
8928 field_shift += FT_MPSHITTYPE_W;
8930 case FRAGMENTATION_F:
8931 field_shift += FT_FRAGMENTATION_W;
8938 int t4_init_rss_mode(struct adapter *adap, int mbox)
8941 struct fw_rss_vi_config_cmd rvc;
8943 memset(&rvc, 0, sizeof(rvc));
8945 for_each_port(adap, i) {
8946 struct port_info *p = adap2pinfo(adap, i);
8949 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
8950 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8951 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
8952 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
8953 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
8956 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
8962 * t4_init_portinfo - allocate a virtual interface and initialize port_info
8963 * @pi: the port_info
8964 * @mbox: mailbox to use for the FW command
8965 * @port: physical port associated with the VI
8966 * @pf: the PF owning the VI
8967 * @vf: the VF owning the VI
8968 * @mac: the MAC address of the VI
8970 * Allocates a virtual interface for the given physical port. If @mac is
8971 * not %NULL it contains the MAC address of the VI as assigned by FW.
8972 * @mac should be large enough to hold an Ethernet address.
8973 * Returns < 0 on error.
8975 int t4_init_portinfo(struct port_info *pi, int mbox,
8976 int port, int pf, int vf, u8 mac[])
8978 struct adapter *adapter = pi->adapter;
8979 unsigned int fw_caps = adapter->params.fw_caps_support;
8980 struct fw_port_cmd cmd;
8981 unsigned int rss_size;
8982 enum fw_port_type port_type;
8984 fw_port_cap32_t pcaps, acaps;
8987 /* If we haven't yet determined whether we're talking to Firmware
8988 * which knows the new 32-bit Port Capabilities, it's time to find
8989 * out now. This will also tell new Firmware to send us Port Status
8990 * Updates using the new 32-bit Port Capabilities version of the
8991 * Port Information message.
8993 if (fw_caps == FW_CAPS_UNKNOWN) {
8996 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
8997 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
8999 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val);
9000 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
9001 adapter->params.fw_caps_support = fw_caps;
9004 memset(&cmd, 0, sizeof(cmd));
9005 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
9006 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9007 FW_PORT_CMD_PORTID_V(port));
9008 cmd.action_to_len16 = cpu_to_be32(
9009 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9010 ? FW_PORT_ACTION_GET_PORT_INFO
9011 : FW_PORT_ACTION_GET_PORT_INFO32) |
9013 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
9017 /* Extract the various fields from the Port Information message.
9019 if (fw_caps == FW_CAPS16) {
9020 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9022 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9023 mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9024 ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9026 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9027 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9029 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9031 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9032 mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9033 ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9035 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9036 acaps = be32_to_cpu(cmd.u.info32.acaps32);
9039 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
9046 pi->rss_size = rss_size;
9048 pi->port_type = port_type;
9049 pi->mdio_addr = mdio_addr;
9050 pi->mod_type = FW_PORT_MOD_TYPE_NA;
9052 init_link_config(&pi->link_cfg, pcaps, acaps);
9056 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9061 for_each_port(adap, i) {
9062 struct port_info *pi = adap2pinfo(adap, i);
9064 while ((adap->params.portvec & (1 << j)) == 0)
9067 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9071 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
9078 * t4_read_cimq_cfg - read CIM queue configuration
9079 * @adap: the adapter
9080 * @base: holds the queue base addresses in bytes
9081 * @size: holds the queue sizes in bytes
9082 * @thres: holds the queue full thresholds in bytes
9084 * Returns the current configuration of the CIM queues, starting with
9085 * the IBQs, then the OBQs.
9087 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9090 int cim_num_obq = is_t4(adap->params.chip) ?
9091 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9093 for (i = 0; i < CIM_NUM_IBQ; i++) {
9094 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9096 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9097 /* value is in 256-byte units */
9098 *base++ = CIMQBASE_G(v) * 256;
9099 *size++ = CIMQSIZE_G(v) * 256;
9100 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9102 for (i = 0; i < cim_num_obq; i++) {
9103 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9105 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9106 /* value is in 256-byte units */
9107 *base++ = CIMQBASE_G(v) * 256;
9108 *size++ = CIMQSIZE_G(v) * 256;
9113 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9114 * @adap: the adapter
9115 * @qid: the queue index
9116 * @data: where to store the queue contents
9117 * @n: capacity of @data in 32-bit words
9119 * Reads the contents of the selected CIM queue starting at address 0 up
9120 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9121 * error and the number of 32-bit words actually read on success.
9123 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9125 int i, err, attempts;
9127 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9129 if (qid > 5 || (n & 3))
9132 addr = qid * nwords;
9136 /* It might take 3-10ms before the IBQ debug read access is allowed.
9137 * Wait for 1 Sec with a delay of 1 usec.
9141 for (i = 0; i < n; i++, addr++) {
9142 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9144 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9148 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9150 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
9155 * t4_read_cim_obq - read the contents of a CIM outbound queue
9156 * @adap: the adapter
9157 * @qid: the queue index
9158 * @data: where to store the queue contents
9159 * @n: capacity of @data in 32-bit words
9161 * Reads the contents of the selected CIM queue starting at address 0 up
9162 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9163 * error and the number of 32-bit words actually read on success.
9165 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9168 unsigned int addr, v, nwords;
9169 int cim_num_obq = is_t4(adap->params.chip) ?
9170 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9172 if ((qid > (cim_num_obq - 1)) || (n & 3))
9175 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9176 QUENUMSELECT_V(qid));
9177 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9179 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
9180 nwords = CIMQSIZE_G(v) * 64; /* same */
9184 for (i = 0; i < n; i++, addr++) {
9185 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9187 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9191 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9193 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
9198 * t4_cim_read - read a block from CIM internal address space
9199 * @adap: the adapter
9200 * @addr: the start address within the CIM address space
9201 * @n: number of words to read
9202 * @valp: where to store the result
9204 * Reads a block of 4-byte words from the CIM intenal address space.
9206 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9211 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9214 for ( ; !ret && n--; addr += 4) {
9215 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9216 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9219 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9225 * t4_cim_write - write a block into CIM internal address space
9226 * @adap: the adapter
9227 * @addr: the start address within the CIM address space
9228 * @n: number of words to write
9229 * @valp: set of values to write
9231 * Writes a block of 4-byte words into the CIM intenal address space.
9233 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9234 const unsigned int *valp)
9238 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9241 for ( ; !ret && n--; addr += 4) {
9242 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9243 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9244 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9250 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9253 return t4_cim_write(adap, addr, 1, &val);
9257 * t4_cim_read_la - read CIM LA capture buffer
9258 * @adap: the adapter
9259 * @la_buf: where to store the LA data
9260 * @wrptr: the HW write pointer within the capture buffer
9262 * Reads the contents of the CIM LA buffer with the most recent entry at
9263 * the end of the returned data and with the entry at @wrptr first.
9264 * We try to leave the LA in the running state we find it in.
9266 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9269 unsigned int cfg, val, idx;
9271 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9275 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
9276 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9281 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9285 idx = UPDBGLAWRPTR_G(val);
9289 for (i = 0; i < adap->params.cim_la_size; i++) {
9290 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9291 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9294 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9297 if (val & UPDBGLARDEN_F) {
9301 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9305 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9306 * identify the 32-bit portion of the full 312-bit data
9308 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9309 idx = (idx & 0xff0) + 0x10;
9312 /* address can't exceed 0xfff */
9313 idx &= UPDBGLARDPTR_M;
9316 if (cfg & UPDBGLAEN_F) {
9317 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9318 cfg & ~UPDBGLARDEN_F);
9326 * t4_tp_read_la - read TP LA capture buffer
9327 * @adap: the adapter
9328 * @la_buf: where to store the LA data
9329 * @wrptr: the HW write pointer within the capture buffer
9331 * Reads the contents of the TP LA buffer with the most recent entry at
9332 * the end of the returned data and with the entry at @wrptr first.
9333 * We leave the LA in the running state we find it in.
9335 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9337 bool last_incomplete;
9338 unsigned int i, cfg, val, idx;
9340 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
9341 if (cfg & DBGLAENABLE_F) /* freeze LA */
9342 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9343 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
9345 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
9346 idx = DBGLAWPTR_G(val);
9347 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
9348 if (last_incomplete)
9349 idx = (idx + 1) & DBGLARPTR_M;
9354 val &= ~DBGLARPTR_V(DBGLARPTR_M);
9355 val |= adap->params.tp.la_mask;
9357 for (i = 0; i < TPLA_SIZE; i++) {
9358 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
9359 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
9360 idx = (idx + 1) & DBGLARPTR_M;
9363 /* Wipe out last entry if it isn't valid */
9364 if (last_incomplete)
9365 la_buf[TPLA_SIZE - 1] = ~0ULL;
9367 if (cfg & DBGLAENABLE_F) /* restore running state */
9368 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9369 cfg | adap->params.tp.la_mask);
9372 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
9373 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
9374 * state for more than the Warning Threshold then we'll issue a warning about
9375 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
9376 * appears to be hung every Warning Repeat second till the situation clears.
9377 * If the situation clears, we'll note that as well.
9379 #define SGE_IDMA_WARN_THRESH 1
9380 #define SGE_IDMA_WARN_REPEAT 300
9383 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
9384 * @adapter: the adapter
9385 * @idma: the adapter IDMA Monitor state
9387 * Initialize the state of an SGE Ingress DMA Monitor.
9389 void t4_idma_monitor_init(struct adapter *adapter,
9390 struct sge_idma_monitor_state *idma)
9392 /* Initialize the state variables for detecting an SGE Ingress DMA
9393 * hang. The SGE has internal counters which count up on each clock
9394 * tick whenever the SGE finds its Ingress DMA State Engines in the
9395 * same state they were on the previous clock tick. The clock used is
9396 * the Core Clock so we have a limit on the maximum "time" they can
9397 * record; typically a very small number of seconds. For instance,
9398 * with a 600MHz Core Clock, we can only count up to a bit more than
9399 * 7s. So we'll synthesize a larger counter in order to not run the
9400 * risk of having the "timers" overflow and give us the flexibility to
9401 * maintain a Hung SGE State Machine of our own which operates across
9402 * a longer time frame.
9404 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
9405 idma->idma_stalled[0] = 0;
9406 idma->idma_stalled[1] = 0;
9410 * t4_idma_monitor - monitor SGE Ingress DMA state
9411 * @adapter: the adapter
9412 * @idma: the adapter IDMA Monitor state
9413 * @hz: number of ticks/second
9414 * @ticks: number of ticks since the last IDMA Monitor call
9416 void t4_idma_monitor(struct adapter *adapter,
9417 struct sge_idma_monitor_state *idma,
9420 int i, idma_same_state_cnt[2];
9422 /* Read the SGE Debug Ingress DMA Same State Count registers. These
9423 * are counters inside the SGE which count up on each clock when the
9424 * SGE finds its Ingress DMA State Engines in the same states they
9425 * were in the previous clock. The counters will peg out at
9426 * 0xffffffff without wrapping around so once they pass the 1s
9427 * threshold they'll stay above that till the IDMA state changes.
9429 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
9430 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
9431 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9433 for (i = 0; i < 2; i++) {
9434 u32 debug0, debug11;
9436 /* If the Ingress DMA Same State Counter ("timer") is less
9437 * than 1s, then we can reset our synthesized Stall Timer and
9438 * continue. If we have previously emitted warnings about a
9439 * potential stalled Ingress Queue, issue a note indicating
9440 * that the Ingress Queue has resumed forward progress.
9442 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
9443 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
9444 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
9445 "resumed after %d seconds\n",
9446 i, idma->idma_qid[i],
9447 idma->idma_stalled[i] / hz);
9448 idma->idma_stalled[i] = 0;
9452 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
9453 * domain. The first time we get here it'll be because we
9454 * passed the 1s Threshold; each additional time it'll be
9455 * because the RX Timer Callback is being fired on its regular
9458 * If the stall is below our Potential Hung Ingress Queue
9459 * Warning Threshold, continue.
9461 if (idma->idma_stalled[i] == 0) {
9462 idma->idma_stalled[i] = hz;
9463 idma->idma_warn[i] = 0;
9465 idma->idma_stalled[i] += ticks;
9466 idma->idma_warn[i] -= ticks;
9469 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
9472 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
9474 if (idma->idma_warn[i] > 0)
9476 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
9478 /* Read and save the SGE IDMA State and Queue ID information.
9479 * We do this every time in case it changes across time ...
9480 * can't be too careful ...
9482 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
9483 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9484 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
9486 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
9487 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9488 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
9490 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
9491 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
9492 i, idma->idma_qid[i], idma->idma_state[i],
9493 idma->idma_stalled[i] / hz,
9495 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
9500 * t4_load_cfg - download config file
9501 * @adap: the adapter
9502 * @cfg_data: the cfg text file to write
9503 * @size: text file size
9505 * Write the supplied config text file to the card's serial flash.
9507 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9509 int ret, i, n, cfg_addr;
9511 unsigned int flash_cfg_start_sec;
9512 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9514 cfg_addr = t4_flash_cfg_addr(adap);
9519 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9521 if (size > FLASH_CFG_MAX_SIZE) {
9522 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
9523 FLASH_CFG_MAX_SIZE);
9527 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
9529 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9530 flash_cfg_start_sec + i - 1);
9531 /* If size == 0 then we're simply erasing the FLASH sectors associated
9532 * with the on-adapter Firmware Configuration File.
9534 if (ret || size == 0)
9537 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9538 for (i = 0; i < size; i += SF_PAGE_SIZE) {
9539 if ((size - i) < SF_PAGE_SIZE)
9543 ret = t4_write_flash(adap, addr, n, cfg_data);
9547 addr += SF_PAGE_SIZE;
9548 cfg_data += SF_PAGE_SIZE;
9553 dev_err(adap->pdev_dev, "config file %s failed %d\n",
9554 (size == 0 ? "clear" : "download"), ret);
9559 * t4_set_vf_mac - Set MAC address for the specified VF
9560 * @adapter: The adapter
9561 * @vf: one of the VFs instantiated by the specified PF
9562 * @naddr: the number of MAC addresses
9563 * @addr: the MAC address(es) to be set to the specified VF
9565 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
9566 unsigned int naddr, u8 *addr)
9568 struct fw_acl_mac_cmd cmd;
9570 memset(&cmd, 0, sizeof(cmd));
9571 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
9574 FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
9575 FW_ACL_MAC_CMD_VFN_V(vf));
9577 /* Note: Do not enable the ACL */
9578 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
9581 switch (adapter->pf) {
9583 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
9586 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
9589 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
9592 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
9596 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
9600 * t4_read_pace_tbl - read the pace table
9601 * @adap: the adapter
9602 * @pace_vals: holds the returned values
9604 * Returns the values of TP's pace table in microseconds.
9606 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9610 for (i = 0; i < NTX_SCHED; i++) {
9611 t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
9612 v = t4_read_reg(adap, TP_PACE_TABLE_A);
9613 pace_vals[i] = dack_ticks_to_usec(adap, v);
9618 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
9619 * @adap: the adapter
9620 * @sched: the scheduler index
9621 * @kbps: the byte rate in Kbps
9622 * @ipg: the interpacket delay in tenths of nanoseconds
9623 * @sleep_ok: if true we may sleep while awaiting command completion
9625 * Return the current configuration of a HW Tx scheduler.
9627 void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
9628 unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
9630 unsigned int v, addr, bpt, cpt;
9633 addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
9634 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9637 bpt = (v >> 8) & 0xff;
9640 *kbps = 0; /* scheduler disabled */
9642 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9643 *kbps = (v * bpt) / 125;
9647 addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
9648 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9652 *ipg = (10000 * v) / core_ticks_per_usec(adap);
9656 /* t4_sge_ctxt_rd - read an SGE context through FW
9657 * @adap: the adapter
9658 * @mbox: mailbox to use for the FW command
9659 * @cid: the context id
9660 * @ctype: the context type
9661 * @data: where to store the context data
9663 * Issues a FW command through the given mailbox to read an SGE context.
9665 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9666 enum ctxt_type ctype, u32 *data)
9668 struct fw_ldst_cmd c;
9671 if (ctype == CTXT_FLM)
9672 ret = FW_LDST_ADDRSPC_SGE_FLMC;
9674 ret = FW_LDST_ADDRSPC_SGE_CONMC;
9676 memset(&c, 0, sizeof(c));
9677 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
9678 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9679 FW_LDST_CMD_ADDRSPACE_V(ret));
9680 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9681 c.u.idctxt.physid = cpu_to_be32(cid);
9683 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9685 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9686 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9687 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9688 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9689 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9690 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9696 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9697 * @adap: the adapter
9698 * @cid: the context id
9699 * @ctype: the context type
9700 * @data: where to store the context data
9702 * Reads an SGE context directly, bypassing FW. This is only for
9703 * debugging when FW is unavailable.
9705 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
9706 enum ctxt_type ctype, u32 *data)
9710 t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
9711 ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
9713 for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
9714 *data++ = t4_read_reg(adap, i);
9718 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9719 int rateunit, int ratemode, int channel, int class,
9720 int minrate, int maxrate, int weight, int pktsize)
9722 struct fw_sched_cmd cmd;
9724 memset(&cmd, 0, sizeof(cmd));
9725 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
9728 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9730 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9731 cmd.u.params.type = type;
9732 cmd.u.params.level = level;
9733 cmd.u.params.mode = mode;
9734 cmd.u.params.ch = channel;
9735 cmd.u.params.cl = class;
9736 cmd.u.params.unit = rateunit;
9737 cmd.u.params.rate = ratemode;
9738 cmd.u.params.min = cpu_to_be32(minrate);
9739 cmd.u.params.max = cpu_to_be32(maxrate);
9740 cmd.u.params.weight = cpu_to_be16(weight);
9741 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9743 return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),