2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/delay.h>
38 #include "t4_values.h"
40 #include "t4fw_version.h"
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
61 u32 val = t4_read_reg(adapter, reg);
63 if (!!(val & mask) == polarity) {
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
89 * Sets a register field specified by the supplied mask to the
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
102 * t4_read_indirect - read indirectly addressed registers
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
110 * Reads registers that are accessed indirectly through an address/data
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
125 * t4_write_indirect - write indirectly addressed registers
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
154 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
156 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
161 if (is_t4(adap->params.chip))
164 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
167 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168 * Configuration Space read. (None of the other fields matter when
169 * ENABLE is 0 so a simple register write is easier than a
170 * read-modify-write via t4_set_reg_field().)
172 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
176 * t4_report_fw_error - report firmware error
179 * The adapter firmware can indicate error conditions to the host.
180 * If the firmware has indicated an error, print out the reason for
181 * the firmware error.
183 static void t4_report_fw_error(struct adapter *adap)
185 static const char *const reason[] = {
186 "Crash", /* PCIE_FW_EVAL_CRASH */
187 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
188 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
189 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
192 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193 "Reserved", /* reserved */
197 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198 if (pcie_fw & PCIE_FW_ERR_F) {
199 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200 reason[PCIE_FW_EVAL_G(pcie_fw)]);
201 adap->flags &= ~CXGB4_FW_OK;
206 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
208 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
211 for ( ; nflit; nflit--, mbox_addr += 8)
212 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
216 * Handle a FW assertion reported in a mailbox.
218 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
220 struct fw_debug_cmd asrt;
222 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
223 dev_alert(adap->pdev_dev,
224 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
225 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
226 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
230 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
231 * @adapter: the adapter
232 * @cmd: the Firmware Mailbox Command or Reply
233 * @size: command length in bytes
234 * @access: the time (ms) needed to access the Firmware Mailbox
235 * @execute: the time (ms) the command spent being executed
237 static void t4_record_mbox(struct adapter *adapter,
238 const __be64 *cmd, unsigned int size,
239 int access, int execute)
241 struct mbox_cmd_log *log = adapter->mbox_log;
242 struct mbox_cmd *entry;
245 entry = mbox_cmd_log_entry(log, log->cursor++);
246 if (log->cursor == log->size)
249 for (i = 0; i < size / 8; i++)
250 entry->cmd[i] = be64_to_cpu(cmd[i]);
251 while (i < MBOX_LEN / 8)
253 entry->timestamp = jiffies;
254 entry->seqno = log->seqno++;
255 entry->access = access;
256 entry->execute = execute;
260 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
262 * @mbox: index of the mailbox to use
263 * @cmd: the command to write
264 * @size: command length in bytes
265 * @rpl: where to optionally store the reply
266 * @sleep_ok: if true we may sleep while awaiting command completion
267 * @timeout: time to wait for command to finish before timing out
269 * Sends the given command to FW through the selected mailbox and waits
270 * for the FW to execute the command. If @rpl is not %NULL it is used to
271 * store the FW's reply to the command. The command and its optional
272 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
273 * to respond. @sleep_ok determines whether we may sleep while awaiting
274 * the response. If sleeping is allowed we use progressive backoff
277 * The return value is 0 on success or a negative errno on failure. A
278 * failure can happen either because we are not able to execute the
279 * command or FW executes it but signals an error. In the latter case
280 * the return value is the error code indicated by FW (negated).
282 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
283 int size, void *rpl, bool sleep_ok, int timeout)
285 static const int delay[] = {
286 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
289 struct mbox_list entry;
294 int i, ms, delay_idx, ret;
295 const __be64 *p = cmd;
296 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
297 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
298 __be64 cmd_rpl[MBOX_LEN / 8];
301 if ((size & 15) || size > MBOX_LEN)
305 * If the device is off-line, as in EEH, commands will time out.
306 * Fail them early so we don't waste time waiting.
308 if (adap->pdev->error_state != pci_channel_io_normal)
311 /* If we have a negative timeout, that implies that we can't sleep. */
317 /* Queue ourselves onto the mailbox access list. When our entry is at
318 * the front of the list, we have rights to access the mailbox. So we
319 * wait [for a while] till we're at the front [or bail out with an
322 spin_lock_bh(&adap->mbox_lock);
323 list_add_tail(&entry.list, &adap->mlist.list);
324 spin_unlock_bh(&adap->mbox_lock);
329 for (i = 0; ; i += ms) {
330 /* If we've waited too long, return a busy indication. This
331 * really ought to be based on our initial position in the
332 * mailbox access list but this is a start. We very rarely
333 * contend on access to the mailbox ...
335 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
336 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
337 spin_lock_bh(&adap->mbox_lock);
338 list_del(&entry.list);
339 spin_unlock_bh(&adap->mbox_lock);
340 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
341 t4_record_mbox(adap, cmd, size, access, ret);
345 /* If we're at the head, break out and start the mailbox
348 if (list_first_entry(&adap->mlist.list, struct mbox_list,
352 /* Delay for a bit before checking again ... */
354 ms = delay[delay_idx]; /* last element may repeat */
355 if (delay_idx < ARRAY_SIZE(delay) - 1)
363 /* Loop trying to get ownership of the mailbox. Return an error
364 * if we can't gain ownership.
366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
368 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
369 if (v != MBOX_OWNER_DRV) {
370 spin_lock_bh(&adap->mbox_lock);
371 list_del(&entry.list);
372 spin_unlock_bh(&adap->mbox_lock);
373 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
374 t4_record_mbox(adap, cmd, size, access, ret);
378 /* Copy in the new mailbox command and send it on its way ... */
379 t4_record_mbox(adap, cmd, size, access, 0);
380 for (i = 0; i < size; i += 8)
381 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
383 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
384 t4_read_reg(adap, ctl_reg); /* flush write */
390 !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
394 ms = delay[delay_idx]; /* last element may repeat */
395 if (delay_idx < ARRAY_SIZE(delay) - 1)
401 v = t4_read_reg(adap, ctl_reg);
402 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
403 if (!(v & MBMSGVALID_F)) {
404 t4_write_reg(adap, ctl_reg, 0);
408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
409 res = be64_to_cpu(cmd_rpl[0]);
411 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
412 fw_asrt(adap, data_reg);
413 res = FW_CMD_RETVAL_V(EIO);
415 memcpy(rpl, cmd_rpl, size);
418 t4_write_reg(adap, ctl_reg, 0);
421 t4_record_mbox(adap, cmd_rpl,
422 MBOX_LEN, access, execute);
423 spin_lock_bh(&adap->mbox_lock);
424 list_del(&entry.list);
425 spin_unlock_bh(&adap->mbox_lock);
426 return -FW_CMD_RETVAL_G((int)res);
430 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
431 t4_record_mbox(adap, cmd, size, access, ret);
432 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
433 *(const u8 *)cmd, mbox);
434 t4_report_fw_error(adap);
435 spin_lock_bh(&adap->mbox_lock);
436 list_del(&entry.list);
437 spin_unlock_bh(&adap->mbox_lock);
442 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
443 void *rpl, bool sleep_ok)
445 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
449 static int t4_edc_err_read(struct adapter *adap, int idx)
451 u32 edc_ecc_err_addr_reg;
454 if (is_t4(adap->params.chip)) {
455 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
458 if (idx != 0 && idx != 1) {
459 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
463 edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
464 rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
467 "edc%d err addr 0x%x: 0x%x.\n",
468 idx, edc_ecc_err_addr_reg,
469 t4_read_reg(adap, edc_ecc_err_addr_reg));
471 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
473 (unsigned long long)t4_read_reg64(adap, rdata_reg),
474 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
475 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
476 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
477 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
478 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
479 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
480 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
481 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
487 * t4_memory_rw_init - Get memory window relative offset, base, and size.
489 * @win: PCI-E Memory Window to use
490 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
491 * @mem_off: memory relative offset with respect to @mtype.
492 * @mem_base: configured memory base address.
493 * @mem_aperture: configured memory window aperture.
495 * Get the configured memory window's relative offset, base, and size.
497 int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
498 u32 *mem_base, u32 *mem_aperture)
500 u32 edc_size, mc_size, mem_reg;
502 /* Offset into the region of memory which is being accessed
505 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
506 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
509 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
510 if (mtype == MEM_HMA) {
511 *mem_off = 2 * (edc_size * 1024 * 1024);
512 } else if (mtype != MEM_MC1) {
513 *mem_off = (mtype * (edc_size * 1024 * 1024));
515 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
516 MA_EXT_MEMORY0_BAR_A));
517 *mem_off = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
520 /* Each PCI-E Memory Window is programmed with a window size -- or
521 * "aperture" -- which controls the granularity of its mapping onto
522 * adapter memory. We need to grab that aperture in order to know
523 * how to use the specified window. The window is also programmed
524 * with the base address of the Memory Window in BAR0's address
525 * space. For T4 this is an absolute PCI-E Bus Address. For T5
526 * the address is relative to BAR0.
528 mem_reg = t4_read_reg(adap,
529 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
531 /* a dead adapter will return 0xffffffff for PIO reads */
532 if (mem_reg == 0xffffffff)
535 *mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
536 *mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
537 if (is_t4(adap->params.chip))
538 *mem_base -= adap->t4_bar0;
544 * t4_memory_update_win - Move memory window to specified address.
546 * @win: PCI-E Memory Window to use
547 * @addr: location to move.
549 * Move memory window to specified address.
551 void t4_memory_update_win(struct adapter *adap, int win, u32 addr)
554 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
556 /* Read it back to ensure that changes propagate before we
557 * attempt to use the new value.
560 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
564 * t4_memory_rw_residual - Read/Write residual data.
566 * @off: relative offset within residual to start read/write.
567 * @addr: address within indicated memory type.
568 * @buf: host memory buffer
569 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
571 * Read/Write residual data less than 32-bits.
573 void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
583 if (dir == T4_MEMORY_READ) {
584 last.word = le32_to_cpu((__force __le32)
585 t4_read_reg(adap, addr));
586 for (bp = (unsigned char *)buf, i = off; i < 4; i++)
587 bp[i] = last.byte[i];
590 for (i = off; i < 4; i++)
592 t4_write_reg(adap, addr,
593 (__force u32)cpu_to_le32(last.word));
598 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
600 * @win: PCI-E Memory Window to use
601 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
602 * @addr: address within indicated memory type
603 * @len: amount of memory to transfer
604 * @hbuf: host memory buffer
605 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
607 * Reads/writes an [almost] arbitrary memory region in the firmware: the
608 * firmware memory address and host buffer must be aligned on 32-bit
609 * boundaries; the length may be arbitrary. The memory is transferred as
610 * a raw byte sequence from/to the firmware's memory. If this memory
611 * contains data structures which contain multi-byte integers, it's the
612 * caller's responsibility to perform appropriate byte order conversions.
614 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
615 u32 len, void *hbuf, int dir)
617 u32 pos, offset, resid, memoffset;
618 u32 win_pf, mem_aperture, mem_base;
622 /* Argument sanity checks ...
624 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
628 /* It's convenient to be able to handle lengths which aren't a
629 * multiple of 32-bits because we often end up transferring files to
630 * the firmware. So we'll handle that by normalizing the length here
631 * and then handling any residual transfer at the end.
636 ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
641 /* Determine the PCIE_MEM_ACCESS_OFFSET */
642 addr = addr + memoffset;
644 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
646 /* Calculate our initial PCI-E Memory Window Position and Offset into
649 pos = addr & ~(mem_aperture - 1);
652 /* Set up initial PCI-E Memory Window to cover the start of our
655 t4_memory_update_win(adap, win, pos | win_pf);
657 /* Transfer data to/from the adapter as long as there's an integral
658 * number of 32-bit transfers to complete.
660 * A note on Endianness issues:
662 * The "register" reads and writes below from/to the PCI-E Memory
663 * Window invoke the standard adapter Big-Endian to PCI-E Link
664 * Little-Endian "swizzel." As a result, if we have the following
665 * data in adapter memory:
667 * Memory: ... | b0 | b1 | b2 | b3 | ...
668 * Address: i+0 i+1 i+2 i+3
670 * Then a read of the adapter memory via the PCI-E Memory Window
675 * [ b3 | b2 | b1 | b0 ]
677 * If this value is stored into local memory on a Little-Endian system
678 * it will show up correctly in local memory as:
680 * ( ..., b0, b1, b2, b3, ... )
682 * But on a Big-Endian system, the store will show up in memory
683 * incorrectly swizzled as:
685 * ( ..., b3, b2, b1, b0, ... )
687 * So we need to account for this in the reads and writes to the
688 * PCI-E Memory Window below by undoing the register read/write
692 if (dir == T4_MEMORY_READ)
693 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
696 t4_write_reg(adap, mem_base + offset,
697 (__force u32)cpu_to_le32(*buf++));
698 offset += sizeof(__be32);
699 len -= sizeof(__be32);
701 /* If we've reached the end of our current window aperture,
702 * move the PCI-E Memory Window on to the next. Note that
703 * doing this here after "len" may be 0 allows us to set up
704 * the PCI-E Memory Window for a possible final residual
707 if (offset == mem_aperture) {
710 t4_memory_update_win(adap, win, pos | win_pf);
714 /* If the original transfer had a length which wasn't a multiple of
715 * 32-bits, now's where we need to finish off the transfer of the
716 * residual amount. The PCI-E Memory Window has already been moved
717 * above (if necessary) to cover this final transfer.
720 t4_memory_rw_residual(adap, resid, mem_base + offset,
726 /* Return the specified PCI-E Configuration Space register from our Physical
727 * Function. We try first via a Firmware LDST Command since we prefer to let
728 * the firmware own all of these registers, but if that fails we go for it
729 * directly ourselves.
731 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
733 u32 val, ldst_addrspace;
735 /* If fw_attach != 0, construct and send the Firmware LDST Command to
736 * retrieve the specified PCI-E Configuration Space register.
738 struct fw_ldst_cmd ldst_cmd;
741 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
742 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
743 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
747 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
748 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
749 ldst_cmd.u.pcie.ctrl_to_fn =
750 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
751 ldst_cmd.u.pcie.r = reg;
753 /* If the LDST Command succeeds, return the result, otherwise
754 * fall through to reading it directly ourselves ...
756 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
759 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
761 /* Read the desired Configuration Space register via the PCI-E
762 * Backdoor mechanism.
764 t4_hw_pci_read_cfg4(adap, reg, &val);
768 /* Get the window based on base passed to it.
769 * Window aperture is currently unhandled, but there is no use case for it
772 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
777 if (is_t4(adap->params.chip)) {
780 /* Truncation intentional: we only read the bottom 32-bits of
781 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
782 * mechanism to read BAR0 instead of using
783 * pci_resource_start() because we could be operating from
784 * within a Virtual Machine which is trapping our accesses to
785 * our Configuration Space and we need to set up the PCI-E
786 * Memory Window decoders with the actual addresses which will
787 * be coming across the PCI-E link.
789 bar0 = t4_read_pcie_cfg4(adap, pci_base);
791 adap->t4_bar0 = bar0;
793 ret = bar0 + memwin_base;
795 /* For T5, only relative offset inside the PCIe BAR is passed */
801 /* Get the default utility window (win0) used by everyone */
802 u32 t4_get_util_window(struct adapter *adap)
804 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
805 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
808 /* Set up memory window for accessing adapter memory ranges. (Read
809 * back MA register to ensure that changes propagate before we attempt
810 * to use the new values.)
812 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
815 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
816 memwin_base | BIR_V(0) |
817 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
819 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
823 * t4_get_regs_len - return the size of the chips register set
824 * @adapter: the adapter
826 * Returns the size of the chip's BAR0 register space.
828 unsigned int t4_get_regs_len(struct adapter *adapter)
830 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
832 switch (chip_version) {
834 return T4_REGMAP_SIZE;
838 return T5_REGMAP_SIZE;
841 dev_err(adapter->pdev_dev,
842 "Unsupported chip version %d\n", chip_version);
847 * t4_get_regs - read chip registers into provided buffer
849 * @buf: register buffer
850 * @buf_size: size (in bytes) of register buffer
852 * If the provided register buffer isn't large enough for the chip's
853 * full register range, the register dump will be truncated to the
854 * register buffer's size.
856 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
858 static const unsigned int t4_reg_ranges[] = {
1317 static const unsigned int t5_reg_ranges[] = {
2081 static const unsigned int t6_reg_ranges[] = {
2640 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2641 const unsigned int *reg_ranges;
2642 int reg_ranges_size, range;
2643 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2645 /* Select the right set of register ranges to dump depending on the
2646 * adapter chip type.
2648 switch (chip_version) {
2650 reg_ranges = t4_reg_ranges;
2651 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2655 reg_ranges = t5_reg_ranges;
2656 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2660 reg_ranges = t6_reg_ranges;
2661 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2665 dev_err(adap->pdev_dev,
2666 "Unsupported chip version %d\n", chip_version);
2670 /* Clear the register buffer and insert the appropriate register
2671 * values selected by the above register ranges.
2673 memset(buf, 0, buf_size);
2674 for (range = 0; range < reg_ranges_size; range += 2) {
2675 unsigned int reg = reg_ranges[range];
2676 unsigned int last_reg = reg_ranges[range + 1];
2677 u32 *bufp = (u32 *)((char *)buf + reg);
2679 /* Iterate across the register range filling in the register
2680 * buffer but don't write past the end of the register buffer.
2682 while (reg <= last_reg && bufp < buf_end) {
2683 *bufp++ = t4_read_reg(adap, reg);
2689 #define EEPROM_STAT_ADDR 0x7bfc
2690 #define VPD_BASE 0x400
2691 #define VPD_BASE_OLD 0
2692 #define VPD_LEN 1024
2695 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2696 * @phys_addr: the physical EEPROM address
2697 * @fn: the PCI function number
2698 * @sz: size of function-specific area
2700 * Translate a physical EEPROM address to virtual. The first 1K is
2701 * accessed through virtual addresses starting at 31K, the rest is
2702 * accessed through virtual addresses starting at 0.
2704 * The mapping is as follows:
2705 * [0..1K) -> [31K..32K)
2706 * [1K..1K+A) -> [31K-A..31K)
2707 * [1K+A..ES) -> [0..ES-A-1K)
2709 * where A = @fn * @sz, and ES = EEPROM size.
2711 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2714 if (phys_addr < 1024)
2715 return phys_addr + (31 << 10);
2716 if (phys_addr < 1024 + fn)
2717 return 31744 - fn + phys_addr - 1024;
2718 if (phys_addr < EEPROMSIZE)
2719 return phys_addr - 1024 - fn;
2724 * t4_seeprom_wp - enable/disable EEPROM write protection
2725 * @adapter: the adapter
2726 * @enable: whether to enable or disable write protection
2728 * Enables or disables write protection on the serial EEPROM.
2730 int t4_seeprom_wp(struct adapter *adapter, bool enable)
2732 unsigned int v = enable ? 0xc : 0;
2733 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2734 return ret < 0 ? ret : 0;
2738 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2739 * @adapter: adapter to read
2740 * @p: where to store the parameters
2742 * Reads card parameters stored in VPD EEPROM.
2744 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2746 int i, ret = 0, addr;
2748 u8 *vpd, base_val = 0;
2749 unsigned int vpdr_len, kw_offset, id_len;
2751 vpd = vmalloc(VPD_LEN);
2755 /* Card information normally starts at VPD_BASE but early cards had
2758 ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
2762 addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : VPD_BASE_OLD;
2764 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2768 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2769 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
2774 id_len = pci_vpd_lrdt_size(vpd);
2775 if (id_len > ID_LEN)
2778 i = pci_vpd_find_tag(vpd, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2780 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
2785 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2786 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2787 if (vpdr_len + kw_offset > VPD_LEN) {
2788 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
2793 #define FIND_VPD_KW(var, name) do { \
2794 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
2796 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
2800 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2803 ret = pci_vpd_check_csum(vpd, VPD_LEN);
2805 dev_err(adapter->pdev_dev, "VPD checksum incorrect or missing\n");
2810 FIND_VPD_KW(sn, "SN");
2811 FIND_VPD_KW(pn, "PN");
2812 FIND_VPD_KW(na, "NA");
2815 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
2817 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2818 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2820 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
2821 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2823 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2824 strim((char *)p->na);
2828 return ret < 0 ? ret : 0;
2832 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2833 * @adapter: adapter to read
2834 * @p: where to store the parameters
2836 * Reads card parameters stored in VPD EEPROM and retrieves the Core
2837 * Clock. This can only be called after a connection to the firmware
2840 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2842 u32 cclk_param, cclk_val;
2845 /* Grab the raw VPD parameters.
2847 ret = t4_get_raw_vpd_params(adapter, p);
2851 /* Ask firmware for the Core Clock since it knows how to translate the
2852 * Reference Clock ('V2') VPD field into a Core Clock value ...
2854 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2855 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2856 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2857 1, &cclk_param, &cclk_val);
2867 * t4_get_pfres - retrieve VF resource limits
2868 * @adapter: the adapter
2870 * Retrieves configured resource limits and capabilities for a physical
2871 * function. The results are stored in @adapter->pfres.
2873 int t4_get_pfres(struct adapter *adapter)
2875 struct pf_resources *pfres = &adapter->params.pfres;
2876 struct fw_pfvf_cmd cmd, rpl;
2880 /* Execute PFVF Read command to get VF resource limits; bail out early
2881 * with error on command failure.
2883 memset(&cmd, 0, sizeof(cmd));
2884 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
2887 FW_PFVF_CMD_PFN_V(adapter->pf) |
2888 FW_PFVF_CMD_VFN_V(0));
2889 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2890 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
2891 if (v != FW_SUCCESS)
2894 /* Extract PF resource limits and return success.
2896 word = be32_to_cpu(rpl.niqflint_niq);
2897 pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
2898 pfres->niq = FW_PFVF_CMD_NIQ_G(word);
2900 word = be32_to_cpu(rpl.type_to_neq);
2901 pfres->neq = FW_PFVF_CMD_NEQ_G(word);
2902 pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
2904 word = be32_to_cpu(rpl.tc_to_nexactf);
2905 pfres->tc = FW_PFVF_CMD_TC_G(word);
2906 pfres->nvi = FW_PFVF_CMD_NVI_G(word);
2907 pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
2909 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
2910 pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
2911 pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
2912 pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
2917 /* serial flash and firmware constants */
2919 SF_ATTEMPTS = 10, /* max retries for SF operations */
2921 /* flash command opcodes */
2922 SF_PROG_PAGE = 2, /* program page */
2923 SF_WR_DISABLE = 4, /* disable writes */
2924 SF_RD_STATUS = 5, /* read status register */
2925 SF_WR_ENABLE = 6, /* enable writes */
2926 SF_RD_DATA_FAST = 0xb, /* read flash */
2927 SF_RD_ID = 0x9f, /* read ID */
2928 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2932 * sf1_read - read data from the serial flash
2933 * @adapter: the adapter
2934 * @byte_cnt: number of bytes to read
2935 * @cont: whether another operation will be chained
2936 * @lock: whether to lock SF for PL access only
2937 * @valp: where to store the read data
2939 * Reads up to 4 bytes of data from the serial flash. The location of
2940 * the read needs to be specified prior to calling this by issuing the
2941 * appropriate commands to the serial flash.
2943 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2944 int lock, u32 *valp)
2948 if (!byte_cnt || byte_cnt > 4)
2950 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2952 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2953 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2954 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2956 *valp = t4_read_reg(adapter, SF_DATA_A);
2961 * sf1_write - write data to the serial flash
2962 * @adapter: the adapter
2963 * @byte_cnt: number of bytes to write
2964 * @cont: whether another operation will be chained
2965 * @lock: whether to lock SF for PL access only
2966 * @val: value to write
2968 * Writes up to 4 bytes of data to the serial flash. The location of
2969 * the write needs to be specified prior to calling this by issuing the
2970 * appropriate commands to the serial flash.
2972 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2975 if (!byte_cnt || byte_cnt > 4)
2977 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2979 t4_write_reg(adapter, SF_DATA_A, val);
2980 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2981 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2982 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2986 * flash_wait_op - wait for a flash operation to complete
2987 * @adapter: the adapter
2988 * @attempts: max number of polls of the status register
2989 * @delay: delay between polls in ms
2991 * Wait for a flash operation to complete by polling the status register.
2993 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
2999 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3000 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3004 if (--attempts == 0)
3012 * t4_read_flash - read words from serial flash
3013 * @adapter: the adapter
3014 * @addr: the start address for the read
3015 * @nwords: how many 32-bit words to read
3016 * @data: where to store the read data
3017 * @byte_oriented: whether to store data as bytes or as words
3019 * Read the specified number of 32-bit words from the serial flash.
3020 * If @byte_oriented is set the read data is stored as a byte array
3021 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3022 * natural endianness.
3024 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3025 unsigned int nwords, u32 *data, int byte_oriented)
3029 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3032 addr = swab32(addr) | SF_RD_DATA_FAST;
3034 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3035 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3038 for ( ; nwords; nwords--, data++) {
3039 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3041 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3045 *data = (__force __u32)(cpu_to_be32(*data));
3051 * t4_write_flash - write up to a page of data to the serial flash
3052 * @adapter: the adapter
3053 * @addr: the start address to write
3054 * @n: length of data to write in bytes
3055 * @data: the data to write
3056 * @byte_oriented: whether to store data as bytes or as words
3058 * Writes up to a page of data (256 bytes) to the serial flash starting
3059 * at the given address. All the data must be written to the same page.
3060 * If @byte_oriented is set the write data is stored as byte stream
3061 * (i.e. matches what on disk), otherwise in big-endian.
3063 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
3064 unsigned int n, const u8 *data, bool byte_oriented)
3066 unsigned int i, c, left, val, offset = addr & 0xff;
3070 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3073 val = swab32(addr) | SF_PROG_PAGE;
3075 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3076 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3079 for (left = n; left; left -= c, data += c) {
3081 for (val = 0, i = 0; i < c; ++i) {
3083 val = (val << 8) + data[i];
3085 val = (val << 8) + data[c - i - 1];
3088 ret = sf1_write(adapter, c, c != left, 1, val);
3092 ret = flash_wait_op(adapter, 8, 1);
3096 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3098 /* Read the page to verify the write succeeded */
3099 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3104 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3105 dev_err(adapter->pdev_dev,
3106 "failed to correctly write the flash page at %#x\n",
3113 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3118 * t4_get_fw_version - read the firmware version
3119 * @adapter: the adapter
3120 * @vers: where to place the version
3122 * Reads the FW version from flash.
3124 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3126 return t4_read_flash(adapter, FLASH_FW_START +
3127 offsetof(struct fw_hdr, fw_ver), 1,
3132 * t4_get_bs_version - read the firmware bootstrap version
3133 * @adapter: the adapter
3134 * @vers: where to place the version
3136 * Reads the FW Bootstrap version from flash.
3138 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3140 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3141 offsetof(struct fw_hdr, fw_ver), 1,
3146 * t4_get_tp_version - read the TP microcode version
3147 * @adapter: the adapter
3148 * @vers: where to place the version
3150 * Reads the TP microcode version from flash.
3152 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3154 return t4_read_flash(adapter, FLASH_FW_START +
3155 offsetof(struct fw_hdr, tp_microcode_ver),
3160 * t4_get_exprom_version - return the Expansion ROM version (if any)
3161 * @adap: the adapter
3162 * @vers: where to place the version
3164 * Reads the Expansion ROM header from FLASH and returns the version
3165 * number (if present) through the @vers return value pointer. We return
3166 * this in the Firmware Version Format since it's convenient. Return
3167 * 0 on success, -ENOENT if no Expansion ROM is present.
3169 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3171 struct exprom_header {
3172 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3173 unsigned char hdr_ver[4]; /* Expansion ROM version */
3175 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3179 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3180 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3185 hdr = (struct exprom_header *)exprom_header_buf;
3186 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3189 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3190 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3191 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3192 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3197 * t4_get_vpd_version - return the VPD version
3198 * @adapter: the adapter
3199 * @vers: where to place the version
3201 * Reads the VPD via the Firmware interface (thus this can only be called
3202 * once we're ready to issue Firmware commands). The format of the
3203 * VPD version is adapter specific. Returns 0 on success, an error on
3206 * Note that early versions of the Firmware didn't include the ability
3207 * to retrieve the VPD version, so we zero-out the return-value parameter
3208 * in that case to avoid leaving it with garbage in it.
3210 * Also note that the Firmware will return its cached copy of the VPD
3211 * Revision ID, not the actual Revision ID as written in the Serial
3212 * EEPROM. This is only an issue if a new VPD has been written and the
3213 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3214 * to defer calling this routine till after a FW_RESET_CMD has been issued
3215 * if the Host Driver will be performing a full adapter initialization.
3217 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3222 vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3223 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3224 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3225 1, &vpdrev_param, vers);
3232 * t4_get_scfg_version - return the Serial Configuration version
3233 * @adapter: the adapter
3234 * @vers: where to place the version
3236 * Reads the Serial Configuration Version via the Firmware interface
3237 * (thus this can only be called once we're ready to issue Firmware
3238 * commands). The format of the Serial Configuration version is
3239 * adapter specific. Returns 0 on success, an error on failure.
3241 * Note that early versions of the Firmware didn't include the ability
3242 * to retrieve the Serial Configuration version, so we zero-out the
3243 * return-value parameter in that case to avoid leaving it with
3246 * Also note that the Firmware will return its cached copy of the Serial
3247 * Initialization Revision ID, not the actual Revision ID as written in
3248 * the Serial EEPROM. This is only an issue if a new VPD has been written
3249 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3250 * it's best to defer calling this routine till after a FW_RESET_CMD has
3251 * been issued if the Host Driver will be performing a full adapter
3254 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3259 scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3260 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3261 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3262 1, &scfgrev_param, vers);
3269 * t4_get_version_info - extract various chip/firmware version information
3270 * @adapter: the adapter
3272 * Reads various chip/firmware version numbers and stores them into the
3273 * adapter Adapter Parameters structure. If any of the efforts fails
3274 * the first failure will be returned, but all of the version numbers
3277 int t4_get_version_info(struct adapter *adapter)
3281 #define FIRST_RET(__getvinfo) \
3283 int __ret = __getvinfo; \
3284 if (__ret && !ret) \
3288 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3289 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3290 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3291 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3292 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3293 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3300 * t4_dump_version_info - dump all of the adapter configuration IDs
3301 * @adapter: the adapter
3303 * Dumps all of the various bits of adapter configuration version/revision
3304 * IDs information. This is typically called at some point after
3305 * t4_get_version_info() has been called.
3307 void t4_dump_version_info(struct adapter *adapter)
3309 /* Device information */
3310 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3311 adapter->params.vpd.id,
3312 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3313 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3314 adapter->params.vpd.sn, adapter->params.vpd.pn);
3316 /* Firmware Version */
3317 if (!adapter->params.fw_vers)
3318 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3320 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3321 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3322 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3323 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3324 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3326 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3327 * Firmware, so dev_info() is more appropriate here.)
3329 if (!adapter->params.bs_vers)
3330 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3332 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3333 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3334 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3335 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3336 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3338 /* TP Microcode Version */
3339 if (!adapter->params.tp_vers)
3340 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3342 dev_info(adapter->pdev_dev,
3343 "TP Microcode version: %u.%u.%u.%u\n",
3344 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3345 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3346 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3347 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3349 /* Expansion ROM version */
3350 if (!adapter->params.er_vers)
3351 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3353 dev_info(adapter->pdev_dev,
3354 "Expansion ROM version: %u.%u.%u.%u\n",
3355 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3356 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3357 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3358 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3360 /* Serial Configuration version */
3361 dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3362 adapter->params.scfg_vers);
3365 dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3366 adapter->params.vpd_vers);
3370 * t4_check_fw_version - check if the FW is supported with this driver
3371 * @adap: the adapter
3373 * Checks if an adapter's FW is compatible with the driver. Returns 0
3374 * if there's exact match, a negative error if the version could not be
3375 * read or there's a major version mismatch
3377 int t4_check_fw_version(struct adapter *adap)
3379 int i, ret, major, minor, micro;
3380 int exp_major, exp_minor, exp_micro;
3381 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3383 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3384 /* Try multiple times before returning error */
3385 for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3386 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3391 major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3392 minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3393 micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3395 switch (chip_version) {
3397 exp_major = T4FW_MIN_VERSION_MAJOR;
3398 exp_minor = T4FW_MIN_VERSION_MINOR;
3399 exp_micro = T4FW_MIN_VERSION_MICRO;
3402 exp_major = T5FW_MIN_VERSION_MAJOR;
3403 exp_minor = T5FW_MIN_VERSION_MINOR;
3404 exp_micro = T5FW_MIN_VERSION_MICRO;
3407 exp_major = T6FW_MIN_VERSION_MAJOR;
3408 exp_minor = T6FW_MIN_VERSION_MINOR;
3409 exp_micro = T6FW_MIN_VERSION_MICRO;
3412 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3417 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3418 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3419 dev_err(adap->pdev_dev,
3420 "Card has firmware version %u.%u.%u, minimum "
3421 "supported firmware is %u.%u.%u.\n", major, minor,
3422 micro, exp_major, exp_minor, exp_micro);
3428 /* Is the given firmware API compatible with the one the driver was compiled
3431 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3434 /* short circuit if it's the exact same firmware version */
3435 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3438 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3439 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3440 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3447 /* The firmware in the filesystem is usable, but should it be installed?
3448 * This routine explains itself in detail if it indicates the filesystem
3449 * firmware should be installed.
3451 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3456 if (!card_fw_usable) {
3457 reason = "incompatible or unusable";
3462 reason = "older than the version supported with this driver";
3469 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3470 "installing firmware %u.%u.%u.%u on card.\n",
3471 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3472 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3473 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3474 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3479 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3480 const u8 *fw_data, unsigned int fw_size,
3481 struct fw_hdr *card_fw, enum dev_state state,
3484 int ret, card_fw_usable, fs_fw_usable;
3485 const struct fw_hdr *fs_fw;
3486 const struct fw_hdr *drv_fw;
3488 drv_fw = &fw_info->fw_hdr;
3490 /* Read the header of the firmware on the card */
3491 ret = t4_read_flash(adap, FLASH_FW_START,
3492 sizeof(*card_fw) / sizeof(uint32_t),
3493 (uint32_t *)card_fw, 1);
3495 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3497 dev_err(adap->pdev_dev,
3498 "Unable to read card's firmware header: %d\n", ret);
3502 if (fw_data != NULL) {
3503 fs_fw = (const void *)fw_data;
3504 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3510 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3511 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3512 /* Common case: the firmware on the card is an exact match and
3513 * the filesystem one is an exact match too, or the filesystem
3514 * one is absent/incompatible.
3516 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3517 should_install_fs_fw(adap, card_fw_usable,
3518 be32_to_cpu(fs_fw->fw_ver),
3519 be32_to_cpu(card_fw->fw_ver))) {
3520 ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
3523 dev_err(adap->pdev_dev,
3524 "failed to install firmware: %d\n", ret);
3528 /* Installed successfully, update the cached header too. */
3531 *reset = 0; /* already reset as part of load_fw */
3534 if (!card_fw_usable) {
3537 d = be32_to_cpu(drv_fw->fw_ver);
3538 c = be32_to_cpu(card_fw->fw_ver);
3539 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3541 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3543 "driver compiled with %d.%d.%d.%d, "
3544 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3546 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3547 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3548 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3549 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3550 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3551 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3556 /* We're using whatever's on the card and it's known to be good. */
3557 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3558 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3565 * t4_flash_erase_sectors - erase a range of flash sectors
3566 * @adapter: the adapter
3567 * @start: the first sector to erase
3568 * @end: the last sector to erase
3570 * Erases the sectors in the given inclusive range.
3572 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3576 if (end >= adapter->params.sf_nsec)
3579 while (start <= end) {
3580 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3581 (ret = sf1_write(adapter, 4, 0, 1,
3582 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3583 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3584 dev_err(adapter->pdev_dev,
3585 "erase of flash sector %d failed, error %d\n",
3591 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3596 * t4_flash_cfg_addr - return the address of the flash configuration file
3597 * @adapter: the adapter
3599 * Return the address within the flash where the Firmware Configuration
3602 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3604 if (adapter->params.sf_size == 0x100000)
3605 return FLASH_FPGA_CFG_START;
3607 return FLASH_CFG_START;
3610 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
3611 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3612 * and emit an error message for mismatched firmware to save our caller the
3615 static bool t4_fw_matches_chip(const struct adapter *adap,
3616 const struct fw_hdr *hdr)
3618 /* The expression below will return FALSE for any unsupported adapter
3619 * which will keep us "honest" in the future ...
3621 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3622 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3623 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3626 dev_err(adap->pdev_dev,
3627 "FW image (%d) is not suitable for this adapter (%d)\n",
3628 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3633 * t4_load_fw - download firmware
3634 * @adap: the adapter
3635 * @fw_data: the firmware image to write
3638 * Write the supplied firmware image to the card's serial flash.
3640 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3645 u8 first_page[SF_PAGE_SIZE];
3646 const __be32 *p = (const __be32 *)fw_data;
3647 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3648 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3649 unsigned int fw_start_sec = FLASH_FW_START_SEC;
3650 unsigned int fw_size = FLASH_FW_MAX_SIZE;
3651 unsigned int fw_start = FLASH_FW_START;
3654 dev_err(adap->pdev_dev, "FW image has no data\n");
3658 dev_err(adap->pdev_dev,
3659 "FW image size not multiple of 512 bytes\n");
3662 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3663 dev_err(adap->pdev_dev,
3664 "FW image size differs from size in FW header\n");
3667 if (size > fw_size) {
3668 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3672 if (!t4_fw_matches_chip(adap, hdr))
3675 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3676 csum += be32_to_cpu(p[i]);
3678 if (csum != 0xffffffff) {
3679 dev_err(adap->pdev_dev,
3680 "corrupted firmware image, checksum %#x\n", csum);
3684 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3685 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3690 * We write the correct version at the end so the driver can see a bad
3691 * version if the FW write fails. Start by writing a copy of the
3692 * first page with a bad version.
3694 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3695 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3696 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
3701 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3702 addr += SF_PAGE_SIZE;
3703 fw_data += SF_PAGE_SIZE;
3704 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
3709 ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
3710 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
3714 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3717 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3722 * t4_phy_fw_ver - return current PHY firmware version
3723 * @adap: the adapter
3724 * @phy_fw_ver: return value buffer for PHY firmware version
3726 * Returns the current version of external PHY firmware on the
3729 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3734 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3735 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3736 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3737 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3738 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3747 * t4_load_phy_fw - download port PHY firmware
3748 * @adap: the adapter
3749 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
3750 * @phy_fw_version: function to check PHY firmware versions
3751 * @phy_fw_data: the PHY firmware image to write
3752 * @phy_fw_size: image size
3754 * Transfer the specified PHY firmware to the adapter. If a non-NULL
3755 * @phy_fw_version is supplied, then it will be used to determine if
3756 * it's necessary to perform the transfer by comparing the version
3757 * of any existing adapter PHY firmware with that of the passed in
3758 * PHY firmware image.
3760 * A negative error number will be returned if an error occurs. If
3761 * version number support is available and there's no need to upgrade
3762 * the firmware, 0 will be returned. If firmware is successfully
3763 * transferred to the adapter, 1 will be returned.
3765 * NOTE: some adapters only have local RAM to store the PHY firmware. As
3766 * a result, a RESET of the adapter would cause that RAM to lose its
3767 * contents. Thus, loading PHY firmware on such adapters must happen
3768 * after any FW_RESET_CMDs ...
3770 int t4_load_phy_fw(struct adapter *adap, int win,
3771 int (*phy_fw_version)(const u8 *, size_t),
3772 const u8 *phy_fw_data, size_t phy_fw_size)
3774 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3775 unsigned long mtype = 0, maddr = 0;
3779 /* If we have version number support, then check to see if the adapter
3780 * already has up-to-date PHY firmware loaded.
3782 if (phy_fw_version) {
3783 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3784 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3788 if (cur_phy_fw_ver >= new_phy_fw_vers) {
3789 CH_WARN(adap, "PHY Firmware already up-to-date, "
3790 "version %#x\n", cur_phy_fw_ver);
3795 /* Ask the firmware where it wants us to copy the PHY firmware image.
3796 * The size of the file requires a special version of the READ command
3797 * which will pass the file size via the values field in PARAMS_CMD and
3798 * retrieve the return value from firmware and place it in the same
3801 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3802 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3803 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3804 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3806 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3807 ¶m, &val, 1, true);
3811 maddr = (val & 0xff) << 16;
3813 /* Copy the supplied PHY Firmware image to the adapter memory location
3814 * allocated by the adapter firmware.
3816 spin_lock_bh(&adap->win0_lock);
3817 ret = t4_memory_rw(adap, win, mtype, maddr,
3818 phy_fw_size, (__be32 *)phy_fw_data,
3820 spin_unlock_bh(&adap->win0_lock);
3824 /* Tell the firmware that the PHY firmware image has been written to
3825 * RAM and it can now start copying it over to the PHYs. The chip
3826 * firmware will RESET the affected PHYs as part of this operation
3827 * leaving them running the new PHY firmware image.
3829 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3830 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3831 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3832 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3833 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3834 ¶m, &val, 30000);
3836 /* If we have version number support, then check to see that the new
3837 * firmware got loaded properly.
3839 if (phy_fw_version) {
3840 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3844 if (cur_phy_fw_ver != new_phy_fw_vers) {
3845 CH_WARN(adap, "PHY Firmware did not update: "
3846 "version on adapter %#x, "
3847 "version flashed %#x\n",
3848 cur_phy_fw_ver, new_phy_fw_vers);
3857 * t4_fwcache - firmware cache operation
3858 * @adap: the adapter
3859 * @op : the operation (flush or flush and invalidate)
3861 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3863 struct fw_params_cmd c;
3865 memset(&c, 0, sizeof(c));
3867 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3868 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3869 FW_PARAMS_CMD_PFN_V(adap->pf) |
3870 FW_PARAMS_CMD_VFN_V(0));
3871 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3873 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3874 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3875 c.param[0].val = cpu_to_be32(op);
3877 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3880 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3881 unsigned int *pif_req_wrptr,
3882 unsigned int *pif_rsp_wrptr)
3885 u32 cfg, val, req, rsp;
3887 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3888 if (cfg & LADBGEN_F)
3889 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3891 val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3892 req = POLADBGWRPTR_G(val);
3893 rsp = PILADBGWRPTR_G(val);
3895 *pif_req_wrptr = req;
3897 *pif_rsp_wrptr = rsp;
3899 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3900 for (j = 0; j < 6; j++) {
3901 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3902 PILADBGRDPTR_V(rsp));
3903 *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3904 *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3908 req = (req + 2) & POLADBGRDPTR_M;
3909 rsp = (rsp + 2) & PILADBGRDPTR_M;
3911 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3914 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3919 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3920 if (cfg & LADBGEN_F)
3921 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3923 for (i = 0; i < CIM_MALA_SIZE; i++) {
3924 for (j = 0; j < 5; j++) {
3926 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3927 PILADBGRDPTR_V(idx));
3928 *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3929 *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3932 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3935 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3939 for (i = 0; i < 8; i++) {
3940 u32 *p = la_buf + i;
3942 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3943 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3944 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3945 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3946 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3950 /* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
3951 * Capabilities which we control with separate controls -- see, for instance,
3952 * Pause Frames and Forward Error Correction. In order to determine what the
3953 * full set of Advertised Port Capabilities are, the base Advertised Port
3954 * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
3955 * Port Capabilities associated with those other controls. See
3956 * t4_link_acaps() for how this is done.
3958 #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3962 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3963 * @caps16: a 16-bit Port Capabilities value
3965 * Returns the equivalent 32-bit Port Capabilities value.
3967 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3969 fw_port_cap32_t caps32 = 0;
3971 #define CAP16_TO_CAP32(__cap) \
3973 if (caps16 & FW_PORT_CAP_##__cap) \
3974 caps32 |= FW_PORT_CAP32_##__cap; \
3977 CAP16_TO_CAP32(SPEED_100M);
3978 CAP16_TO_CAP32(SPEED_1G);
3979 CAP16_TO_CAP32(SPEED_25G);
3980 CAP16_TO_CAP32(SPEED_10G);
3981 CAP16_TO_CAP32(SPEED_40G);
3982 CAP16_TO_CAP32(SPEED_100G);
3983 CAP16_TO_CAP32(FC_RX);
3984 CAP16_TO_CAP32(FC_TX);
3985 CAP16_TO_CAP32(ANEG);
3986 CAP16_TO_CAP32(FORCE_PAUSE);
3987 CAP16_TO_CAP32(MDIAUTO);
3988 CAP16_TO_CAP32(MDISTRAIGHT);
3989 CAP16_TO_CAP32(FEC_RS);
3990 CAP16_TO_CAP32(FEC_BASER_RS);
3991 CAP16_TO_CAP32(802_3_PAUSE);
3992 CAP16_TO_CAP32(802_3_ASM_DIR);
3994 #undef CAP16_TO_CAP32
4000 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
4001 * @caps32: a 32-bit Port Capabilities value
4003 * Returns the equivalent 16-bit Port Capabilities value. Note that
4004 * not all 32-bit Port Capabilities can be represented in the 16-bit
4005 * Port Capabilities and some fields/values may not make it.
4007 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
4009 fw_port_cap16_t caps16 = 0;
4011 #define CAP32_TO_CAP16(__cap) \
4013 if (caps32 & FW_PORT_CAP32_##__cap) \
4014 caps16 |= FW_PORT_CAP_##__cap; \
4017 CAP32_TO_CAP16(SPEED_100M);
4018 CAP32_TO_CAP16(SPEED_1G);
4019 CAP32_TO_CAP16(SPEED_10G);
4020 CAP32_TO_CAP16(SPEED_25G);
4021 CAP32_TO_CAP16(SPEED_40G);
4022 CAP32_TO_CAP16(SPEED_100G);
4023 CAP32_TO_CAP16(FC_RX);
4024 CAP32_TO_CAP16(FC_TX);
4025 CAP32_TO_CAP16(802_3_PAUSE);
4026 CAP32_TO_CAP16(802_3_ASM_DIR);
4027 CAP32_TO_CAP16(ANEG);
4028 CAP32_TO_CAP16(FORCE_PAUSE);
4029 CAP32_TO_CAP16(MDIAUTO);
4030 CAP32_TO_CAP16(MDISTRAIGHT);
4031 CAP32_TO_CAP16(FEC_RS);
4032 CAP32_TO_CAP16(FEC_BASER_RS);
4034 #undef CAP32_TO_CAP16
4039 /* Translate Firmware Port Capabilities Pause specification to Common Code */
4040 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
4042 enum cc_pause cc_pause = 0;
4044 if (fw_pause & FW_PORT_CAP32_FC_RX)
4045 cc_pause |= PAUSE_RX;
4046 if (fw_pause & FW_PORT_CAP32_FC_TX)
4047 cc_pause |= PAUSE_TX;
4052 /* Translate Common Code Pause specification into Firmware Port Capabilities */
4053 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
4055 /* Translate orthogonal RX/TX Pause Controls for L1 Configure
4058 fw_port_cap32_t fw_pause = 0;
4060 if (cc_pause & PAUSE_RX)
4061 fw_pause |= FW_PORT_CAP32_FC_RX;
4062 if (cc_pause & PAUSE_TX)
4063 fw_pause |= FW_PORT_CAP32_FC_TX;
4064 if (!(cc_pause & PAUSE_AUTONEG))
4065 fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
4067 /* Translate orthogonal Pause controls into IEEE 802.3 Pause,
4068 * Asymmetrical Pause for use in reporting to upper layer OS code, etc.
4069 * Note that these bits are ignored in L1 Configure commands.
4071 if (cc_pause & PAUSE_RX) {
4072 if (cc_pause & PAUSE_TX)
4073 fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
4075 fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
4076 FW_PORT_CAP32_802_3_PAUSE;
4077 } else if (cc_pause & PAUSE_TX) {
4078 fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
4084 /* Translate Firmware Forward Error Correction specification to Common Code */
4085 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
4087 enum cc_fec cc_fec = 0;
4089 if (fw_fec & FW_PORT_CAP32_FEC_RS)
4091 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
4092 cc_fec |= FEC_BASER_RS;
4097 /* Translate Common Code Forward Error Correction specification to Firmware */
4098 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
4100 fw_port_cap32_t fw_fec = 0;
4102 if (cc_fec & FEC_RS)
4103 fw_fec |= FW_PORT_CAP32_FEC_RS;
4104 if (cc_fec & FEC_BASER_RS)
4105 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
4111 * t4_link_acaps - compute Link Advertised Port Capabilities
4112 * @adapter: the adapter
4113 * @port: the Port ID
4114 * @lc: the Port's Link Configuration
4116 * Synthesize the Advertised Port Capabilities we'll be using based on
4117 * the base Advertised Port Capabilities (which have been filtered by
4118 * ADVERT_MASK) plus the individual controls for things like Pause
4119 * Frames, Forward Error Correction, MDI, etc.
4121 fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
4122 struct link_config *lc)
4124 fw_port_cap32_t fw_fc, fw_fec, acaps;
4125 unsigned int fw_mdi;
4128 fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
4130 /* Convert driver coding of Pause Frame Flow Control settings into the
4133 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4135 /* Convert Common Code Forward Error Control settings into the
4136 * Firmware's API. If the current Requested FEC has "Automatic"
4137 * (IEEE 802.3) specified, then we use whatever the Firmware
4138 * sent us as part of its IEEE 802.3-based interpretation of
4139 * the Transceiver Module EPROM FEC parameters. Otherwise we
4140 * use whatever is in the current Requested FEC settings.
4142 if (lc->requested_fec & FEC_AUTO)
4143 cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4145 cc_fec = lc->requested_fec;
4146 fw_fec = cc_to_fwcap_fec(cc_fec);
4148 /* Figure out what our Requested Port Capabilities are going to be.
4149 * Note parallel structure in t4_handle_get_port_info() and
4150 * init_link_config().
4152 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4153 acaps = lc->acaps | fw_fc | fw_fec;
4154 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4156 } else if (lc->autoneg == AUTONEG_DISABLE) {
4157 acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4158 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4161 acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
4164 /* Some Requested Port Capabilities are trivially wrong if they exceed
4165 * the Physical Port Capabilities. We can check that here and provide
4166 * moderately useful feedback in the system log.
4168 * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
4169 * we need to exclude this from this check in order to maintain
4172 if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
4173 dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
4182 * t4_link_l1cfg_core - apply link configuration to MAC/PHY
4183 * @adapter: the adapter
4184 * @mbox: the Firmware Mailbox to use
4185 * @port: the Port ID
4186 * @lc: the Port's Link Configuration
4187 * @sleep_ok: if true we may sleep while awaiting command completion
4188 * @timeout: time to wait for command to finish before timing out
4189 * (negative implies @sleep_ok=false)
4191 * Set up a port's MAC and PHY according to a desired link configuration.
4192 * - If the PHY can auto-negotiate first decide what to advertise, then
4193 * enable/disable auto-negotiation as desired, and reset.
4194 * - If the PHY does not auto-negotiate just reset it.
4195 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4196 * otherwise do it later based on the outcome of auto-negotiation.
4198 int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
4199 unsigned int port, struct link_config *lc,
4200 u8 sleep_ok, int timeout)
4202 unsigned int fw_caps = adapter->params.fw_caps_support;
4203 struct fw_port_cmd cmd;
4204 fw_port_cap32_t rcap;
4207 if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
4208 lc->autoneg == AUTONEG_ENABLE) {
4212 /* Compute our Requested Port Capabilities and send that on to the
4215 rcap = t4_link_acaps(adapter, port, lc);
4216 memset(&cmd, 0, sizeof(cmd));
4217 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4218 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4219 FW_PORT_CMD_PORTID_V(port));
4220 cmd.action_to_len16 =
4221 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4222 ? FW_PORT_ACTION_L1_CFG
4223 : FW_PORT_ACTION_L1_CFG32) |
4225 if (fw_caps == FW_CAPS16)
4226 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4228 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4230 ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
4233 /* Unfortunately, even if the Requested Port Capabilities "fit" within
4234 * the Physical Port Capabilities, some combinations of features may
4235 * still not be legal. For example, 40Gb/s and Reed-Solomon Forward
4236 * Error Correction. So if the Firmware rejects the L1 Configure
4237 * request, flag that here.
4240 dev_err(adapter->pdev_dev,
4241 "Requested Port Capabilities %#x rejected, error %d\n",
4249 * t4_restart_aneg - restart autonegotiation
4250 * @adap: the adapter
4251 * @mbox: mbox to use for the FW command
4252 * @port: the port id
4254 * Restarts autonegotiation for the selected port.
4256 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4258 unsigned int fw_caps = adap->params.fw_caps_support;
4259 struct fw_port_cmd c;
4261 memset(&c, 0, sizeof(c));
4262 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4263 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4264 FW_PORT_CMD_PORTID_V(port));
4266 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4267 ? FW_PORT_ACTION_L1_CFG
4268 : FW_PORT_ACTION_L1_CFG32) |
4270 if (fw_caps == FW_CAPS16)
4271 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4273 c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
4274 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4277 typedef void (*int_handler_t)(struct adapter *adap);
4280 unsigned int mask; /* bits to check in interrupt status */
4281 const char *msg; /* message to print or NULL */
4282 short stat_idx; /* stat counter to increment or -1 */
4283 unsigned short fatal; /* whether the condition reported is fatal */
4284 int_handler_t int_handler; /* platform-specific int handler */
4288 * t4_handle_intr_status - table driven interrupt handler
4289 * @adapter: the adapter that generated the interrupt
4290 * @reg: the interrupt status register to process
4291 * @acts: table of interrupt actions
4293 * A table driven interrupt handler that applies a set of masks to an
4294 * interrupt status word and performs the corresponding actions if the
4295 * interrupts described by the mask have occurred. The actions include
4296 * optionally emitting a warning or alert message. The table is terminated
4297 * by an entry specifying mask 0. Returns the number of fatal interrupt
4300 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4301 const struct intr_info *acts)
4304 unsigned int mask = 0;
4305 unsigned int status = t4_read_reg(adapter, reg);
4307 for ( ; acts->mask; ++acts) {
4308 if (!(status & acts->mask))
4312 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4313 status & acts->mask);
4314 } else if (acts->msg && printk_ratelimit())
4315 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4316 status & acts->mask);
4317 if (acts->int_handler)
4318 acts->int_handler(adapter);
4322 if (status) /* clear processed interrupts */
4323 t4_write_reg(adapter, reg, status);
4328 * Interrupt handler for the PCIE module.
4330 static void pcie_intr_handler(struct adapter *adapter)
4332 static const struct intr_info sysbus_intr_info[] = {
4333 { RNPP_F, "RXNP array parity error", -1, 1 },
4334 { RPCP_F, "RXPC array parity error", -1, 1 },
4335 { RCIP_F, "RXCIF array parity error", -1, 1 },
4336 { RCCP_F, "Rx completions control array parity error", -1, 1 },
4337 { RFTP_F, "RXFT array parity error", -1, 1 },
4340 static const struct intr_info pcie_port_intr_info[] = {
4341 { TPCP_F, "TXPC array parity error", -1, 1 },
4342 { TNPP_F, "TXNP array parity error", -1, 1 },
4343 { TFTP_F, "TXFT array parity error", -1, 1 },
4344 { TCAP_F, "TXCA array parity error", -1, 1 },
4345 { TCIP_F, "TXCIF array parity error", -1, 1 },
4346 { RCAP_F, "RXCA array parity error", -1, 1 },
4347 { OTDD_F, "outbound request TLP discarded", -1, 1 },
4348 { RDPE_F, "Rx data parity error", -1, 1 },
4349 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
4352 static const struct intr_info pcie_intr_info[] = {
4353 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4354 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4355 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4356 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4357 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4358 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4359 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4360 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4361 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4362 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4363 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4364 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4365 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4366 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4367 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4368 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4369 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4370 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4371 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4372 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4373 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4374 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4375 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4376 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4377 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4378 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4379 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4380 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
4381 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
4382 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
4387 static struct intr_info t5_pcie_intr_info[] = {
4388 { MSTGRPPERR_F, "Master Response Read Queue parity error",
4390 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4391 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4392 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4393 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4394 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4395 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4396 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
4398 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
4400 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4401 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4402 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4403 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4404 { DREQWRPERR_F, "PCI DMA channel write request parity error",
4406 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4407 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4408 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4409 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4410 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4411 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4412 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4413 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4414 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4415 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4416 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
4418 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4420 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4421 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4422 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4423 { READRSPERR_F, "Outbound read error", -1, 0 },
4429 if (is_t4(adapter->params.chip))
4430 fat = t4_handle_intr_status(adapter,
4431 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4433 t4_handle_intr_status(adapter,
4434 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4435 pcie_port_intr_info) +
4436 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4439 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4443 t4_fatal_err(adapter);
4447 * TP interrupt handler.
4449 static void tp_intr_handler(struct adapter *adapter)
4451 static const struct intr_info tp_intr_info[] = {
4452 { 0x3fffffff, "TP parity error", -1, 1 },
4453 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
4457 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
4458 t4_fatal_err(adapter);
4462 * SGE interrupt handler.
4464 static void sge_intr_handler(struct adapter *adapter)
4469 static const struct intr_info sge_intr_info[] = {
4470 { ERR_CPL_EXCEED_IQE_SIZE_F,
4471 "SGE received CPL exceeding IQE size", -1, 1 },
4472 { ERR_INVALID_CIDX_INC_F,
4473 "SGE GTS CIDX increment too large", -1, 0 },
4474 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4475 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
4476 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
4477 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4478 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
4480 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
4482 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
4484 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
4486 { ERR_ING_CTXT_PRIO_F,
4487 "SGE too many priority ingress contexts", -1, 0 },
4488 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4489 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
4493 static struct intr_info t4t5_sge_intr_info[] = {
4494 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4495 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4496 { ERR_EGR_CTXT_PRIO_F,
4497 "SGE too many priority egress contexts", -1, 0 },
4501 perr = t4_read_reg(adapter, SGE_INT_CAUSE1_A);
4504 dev_alert(adapter->pdev_dev, "SGE Cause1 Parity Error %#x\n",
4508 perr = t4_read_reg(adapter, SGE_INT_CAUSE2_A);
4511 dev_alert(adapter->pdev_dev, "SGE Cause2 Parity Error %#x\n",
4515 if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
4516 perr = t4_read_reg(adapter, SGE_INT_CAUSE5_A);
4517 /* Parity error (CRC) for err_T_RxCRC is trivial, ignore it */
4518 perr &= ~ERR_T_RXCRC_F;
4521 dev_alert(adapter->pdev_dev,
4522 "SGE Cause5 Parity Error %#x\n", perr);
4526 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4527 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4528 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4529 t4t5_sge_intr_info);
4531 err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4532 if (err & ERROR_QID_VALID_F) {
4533 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4535 if (err & UNCAPTURED_ERROR_F)
4536 dev_err(adapter->pdev_dev,
4537 "SGE UNCAPTURED_ERROR set (clearing)\n");
4538 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4539 UNCAPTURED_ERROR_F);
4543 t4_fatal_err(adapter);
4546 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4547 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4548 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4549 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4552 * CIM interrupt handler.
4554 static void cim_intr_handler(struct adapter *adapter)
4556 static const struct intr_info cim_intr_info[] = {
4557 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4558 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4559 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4560 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4561 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4562 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4563 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4564 { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4567 static const struct intr_info cim_upintr_info[] = {
4568 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4569 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4570 { ILLWRINT_F, "CIM illegal write", -1, 1 },
4571 { ILLRDINT_F, "CIM illegal read", -1, 1 },
4572 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4573 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4574 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4575 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4576 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4577 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4578 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4579 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4580 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4581 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4582 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4583 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4584 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4585 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4586 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4587 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4588 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4589 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4590 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4591 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4592 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4593 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4594 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4595 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4602 fw_err = t4_read_reg(adapter, PCIE_FW_A);
4603 if (fw_err & PCIE_FW_ERR_F)
4604 t4_report_fw_error(adapter);
4606 /* When the Firmware detects an internal error which normally
4607 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4608 * in order to make sure the Host sees the Firmware Crash. So
4609 * if we have a Timer0 interrupt and don't see a Firmware Crash,
4610 * ignore the Timer0 interrupt.
4613 val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4614 if (val & TIMER0INT_F)
4615 if (!(fw_err & PCIE_FW_ERR_F) ||
4616 (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4617 t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4620 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4622 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4625 t4_fatal_err(adapter);
4629 * ULP RX interrupt handler.
4631 static void ulprx_intr_handler(struct adapter *adapter)
4633 static const struct intr_info ulprx_intr_info[] = {
4634 { 0x1800000, "ULPRX context error", -1, 1 },
4635 { 0x7fffff, "ULPRX parity error", -1, 1 },
4639 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4640 t4_fatal_err(adapter);
4644 * ULP TX interrupt handler.
4646 static void ulptx_intr_handler(struct adapter *adapter)
4648 static const struct intr_info ulptx_intr_info[] = {
4649 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4651 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4653 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4655 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4657 { 0xfffffff, "ULPTX parity error", -1, 1 },
4661 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4662 t4_fatal_err(adapter);
4666 * PM TX interrupt handler.
4668 static void pmtx_intr_handler(struct adapter *adapter)
4670 static const struct intr_info pmtx_intr_info[] = {
4671 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4672 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4673 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4674 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4675 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4676 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4677 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4679 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4680 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4684 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4685 t4_fatal_err(adapter);
4689 * PM RX interrupt handler.
4691 static void pmrx_intr_handler(struct adapter *adapter)
4693 static const struct intr_info pmrx_intr_info[] = {
4694 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4695 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4696 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4697 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4699 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4700 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4704 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4705 t4_fatal_err(adapter);
4709 * CPL switch interrupt handler.
4711 static void cplsw_intr_handler(struct adapter *adapter)
4713 static const struct intr_info cplsw_intr_info[] = {
4714 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4715 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4716 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4717 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4718 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4719 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4723 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4724 t4_fatal_err(adapter);
4728 * LE interrupt handler.
4730 static void le_intr_handler(struct adapter *adap)
4732 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4733 static const struct intr_info le_intr_info[] = {
4734 { LIPMISS_F, "LE LIP miss", -1, 0 },
4735 { LIP0_F, "LE 0 LIP error", -1, 0 },
4736 { PARITYERR_F, "LE parity error", -1, 1 },
4737 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4738 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
4742 static struct intr_info t6_le_intr_info[] = {
4743 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4744 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4745 { CMDTIDERR_F, "LE cmd tid error", -1, 1 },
4746 { TCAMINTPERR_F, "LE parity error", -1, 1 },
4747 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4748 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4749 { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 },
4753 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4754 (chip <= CHELSIO_T5) ?
4755 le_intr_info : t6_le_intr_info))
4760 * MPS interrupt handler.
4762 static void mps_intr_handler(struct adapter *adapter)
4764 static const struct intr_info mps_rx_intr_info[] = {
4765 { 0xffffff, "MPS Rx parity error", -1, 1 },
4768 static const struct intr_info mps_tx_intr_info[] = {
4769 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4770 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4771 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4773 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4775 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4776 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4777 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4780 static const struct intr_info t6_mps_tx_intr_info[] = {
4781 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4782 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4783 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4785 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4787 /* MPS Tx Bubble is normal for T6 */
4788 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4789 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4792 static const struct intr_info mps_trc_intr_info[] = {
4793 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4794 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4796 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4799 static const struct intr_info mps_stat_sram_intr_info[] = {
4800 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4803 static const struct intr_info mps_stat_tx_intr_info[] = {
4804 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4807 static const struct intr_info mps_stat_rx_intr_info[] = {
4808 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4811 static const struct intr_info mps_cls_intr_info[] = {
4812 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4813 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4814 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4820 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4822 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4823 is_t6(adapter->params.chip)
4824 ? t6_mps_tx_intr_info
4825 : mps_tx_intr_info) +
4826 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4827 mps_trc_intr_info) +
4828 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4829 mps_stat_sram_intr_info) +
4830 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4831 mps_stat_tx_intr_info) +
4832 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4833 mps_stat_rx_intr_info) +
4834 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4837 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4838 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
4840 t4_fatal_err(adapter);
4843 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4847 * EDC/MC interrupt handler.
4849 static void mem_intr_handler(struct adapter *adapter, int idx)
4851 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4853 unsigned int addr, cnt_addr, v;
4855 if (idx <= MEM_EDC1) {
4856 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4857 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4858 } else if (idx == MEM_MC) {
4859 if (is_t4(adapter->params.chip)) {
4860 addr = MC_INT_CAUSE_A;
4861 cnt_addr = MC_ECC_STATUS_A;
4863 addr = MC_P_INT_CAUSE_A;
4864 cnt_addr = MC_P_ECC_STATUS_A;
4867 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4868 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4871 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4872 if (v & PERR_INT_CAUSE_F)
4873 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4875 if (v & ECC_CE_INT_CAUSE_F) {
4876 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4878 t4_edc_err_read(adapter, idx);
4880 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4881 if (printk_ratelimit())
4882 dev_warn(adapter->pdev_dev,
4883 "%u %s correctable ECC data error%s\n",
4884 cnt, name[idx], cnt > 1 ? "s" : "");
4886 if (v & ECC_UE_INT_CAUSE_F)
4887 dev_alert(adapter->pdev_dev,
4888 "%s uncorrectable ECC data error\n", name[idx]);
4890 t4_write_reg(adapter, addr, v);
4891 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4892 t4_fatal_err(adapter);
4896 * MA interrupt handler.
4898 static void ma_intr_handler(struct adapter *adap)
4900 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4902 if (status & MEM_PERR_INT_CAUSE_F) {
4903 dev_alert(adap->pdev_dev,
4904 "MA parity error, parity status %#x\n",
4905 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4906 if (is_t5(adap->params.chip))
4907 dev_alert(adap->pdev_dev,
4908 "MA parity error, parity status %#x\n",
4910 MA_PARITY_ERROR_STATUS2_A));
4912 if (status & MEM_WRAP_INT_CAUSE_F) {
4913 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4914 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4915 "client %u to address %#x\n",
4916 MEM_WRAP_CLIENT_NUM_G(v),
4917 MEM_WRAP_ADDRESS_G(v) << 4);
4919 t4_write_reg(adap, MA_INT_CAUSE_A, status);
4924 * SMB interrupt handler.
4926 static void smb_intr_handler(struct adapter *adap)
4928 static const struct intr_info smb_intr_info[] = {
4929 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4930 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4931 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4935 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4940 * NC-SI interrupt handler.
4942 static void ncsi_intr_handler(struct adapter *adap)
4944 static const struct intr_info ncsi_intr_info[] = {
4945 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4946 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4947 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4948 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4952 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4957 * XGMAC interrupt handler.
4959 static void xgmac_intr_handler(struct adapter *adap, int port)
4961 u32 v, int_cause_reg;
4963 if (is_t4(adap->params.chip))
4964 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4966 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4968 v = t4_read_reg(adap, int_cause_reg);
4970 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4974 if (v & TXFIFO_PRTY_ERR_F)
4975 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4977 if (v & RXFIFO_PRTY_ERR_F)
4978 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4980 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4985 * PL interrupt handler.
4987 static void pl_intr_handler(struct adapter *adap)
4989 static const struct intr_info pl_intr_info[] = {
4990 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
4991 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
4995 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
4999 #define PF_INTR_MASK (PFSW_F)
5000 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
5001 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
5002 CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
5005 * t4_slow_intr_handler - control path interrupt handler
5006 * @adapter: the adapter
5008 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
5009 * The designation 'slow' is because it involves register reads, while
5010 * data interrupts typically don't involve any MMIOs.
5012 int t4_slow_intr_handler(struct adapter *adapter)
5014 /* There are rare cases where a PL_INT_CAUSE bit may end up getting
5015 * set when the corresponding PL_INT_ENABLE bit isn't set. It's
5016 * easiest just to mask that case here.
5018 u32 raw_cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
5019 u32 enable = t4_read_reg(adapter, PL_INT_ENABLE_A);
5020 u32 cause = raw_cause & enable;
5022 if (!(cause & GLBL_INTR_MASK))
5025 cim_intr_handler(adapter);
5027 mps_intr_handler(adapter);
5029 ncsi_intr_handler(adapter);
5031 pl_intr_handler(adapter);
5033 smb_intr_handler(adapter);
5034 if (cause & XGMAC0_F)
5035 xgmac_intr_handler(adapter, 0);
5036 if (cause & XGMAC1_F)
5037 xgmac_intr_handler(adapter, 1);
5038 if (cause & XGMAC_KR0_F)
5039 xgmac_intr_handler(adapter, 2);
5040 if (cause & XGMAC_KR1_F)
5041 xgmac_intr_handler(adapter, 3);
5043 pcie_intr_handler(adapter);
5045 mem_intr_handler(adapter, MEM_MC);
5046 if (is_t5(adapter->params.chip) && (cause & MC1_F))
5047 mem_intr_handler(adapter, MEM_MC1);
5049 mem_intr_handler(adapter, MEM_EDC0);
5051 mem_intr_handler(adapter, MEM_EDC1);
5053 le_intr_handler(adapter);
5055 tp_intr_handler(adapter);
5057 ma_intr_handler(adapter);
5058 if (cause & PM_TX_F)
5059 pmtx_intr_handler(adapter);
5060 if (cause & PM_RX_F)
5061 pmrx_intr_handler(adapter);
5062 if (cause & ULP_RX_F)
5063 ulprx_intr_handler(adapter);
5064 if (cause & CPL_SWITCH_F)
5065 cplsw_intr_handler(adapter);
5067 sge_intr_handler(adapter);
5068 if (cause & ULP_TX_F)
5069 ulptx_intr_handler(adapter);
5071 /* Clear the interrupts just processed for which we are the master. */
5072 t4_write_reg(adapter, PL_INT_CAUSE_A, raw_cause & GLBL_INTR_MASK);
5073 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
5078 * t4_intr_enable - enable interrupts
5079 * @adapter: the adapter whose interrupts should be enabled
5081 * Enable PF-specific interrupts for the calling function and the top-level
5082 * interrupt concentrator for global interrupts. Interrupts are already
5083 * enabled at each module, here we just enable the roots of the interrupt
5086 * Note: this function should be called only when the driver manages
5087 * non PF-specific interrupts from the various HW modules. Only one PCI
5088 * function at a time should be doing this.
5090 void t4_intr_enable(struct adapter *adapter)
5093 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5094 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5095 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5097 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5098 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
5099 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
5100 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
5101 ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
5102 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
5103 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
5104 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
5105 DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
5106 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
5107 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
5111 * t4_intr_disable - disable interrupts
5112 * @adapter: the adapter whose interrupts should be disabled
5114 * Disable interrupts. We only disable the top-level interrupt
5115 * concentrators. The caller must be a PCI function managing global
5118 void t4_intr_disable(struct adapter *adapter)
5122 if (pci_channel_offline(adapter->pdev))
5125 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5126 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5127 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5129 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
5130 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
5133 unsigned int t4_chip_rss_size(struct adapter *adap)
5135 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
5136 return RSS_NENTRIES;
5138 return T6_RSS_NENTRIES;
5142 * t4_config_rss_range - configure a portion of the RSS mapping table
5143 * @adapter: the adapter
5144 * @mbox: mbox to use for the FW command
5145 * @viid: virtual interface whose RSS subtable is to be written
5146 * @start: start entry in the table to write
5147 * @n: how many table entries to write
5148 * @rspq: values for the response queue lookup table
5149 * @nrspq: number of values in @rspq
5151 * Programs the selected part of the VI's RSS mapping table with the
5152 * provided values. If @nrspq < @n the supplied values are used repeatedly
5153 * until the full table range is populated.
5155 * The caller must ensure the values in @rspq are in the range allowed for
5158 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5159 int start, int n, const u16 *rspq, unsigned int nrspq)
5162 const u16 *rsp = rspq;
5163 const u16 *rsp_end = rspq + nrspq;
5164 struct fw_rss_ind_tbl_cmd cmd;
5166 memset(&cmd, 0, sizeof(cmd));
5167 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
5168 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5169 FW_RSS_IND_TBL_CMD_VIID_V(viid));
5170 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5172 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
5174 int nq = min(n, 32);
5175 __be32 *qp = &cmd.iq0_to_iq2;
5177 cmd.niqid = cpu_to_be16(nq);
5178 cmd.startidx = cpu_to_be16(start);
5186 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
5187 if (++rsp >= rsp_end)
5189 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
5190 if (++rsp >= rsp_end)
5192 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
5193 if (++rsp >= rsp_end)
5196 *qp++ = cpu_to_be32(v);
5200 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5208 * t4_config_glbl_rss - configure the global RSS mode
5209 * @adapter: the adapter
5210 * @mbox: mbox to use for the FW command
5211 * @mode: global RSS mode
5212 * @flags: mode-specific flags
5214 * Sets the global RSS mode.
5216 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5219 struct fw_rss_glb_config_cmd c;
5221 memset(&c, 0, sizeof(c));
5222 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5223 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5224 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5225 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5226 c.u.manual.mode_pkd =
5227 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5228 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5229 c.u.basicvirtual.mode_pkd =
5230 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5231 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5234 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5238 * t4_config_vi_rss - configure per VI RSS settings
5239 * @adapter: the adapter
5240 * @mbox: mbox to use for the FW command
5243 * @defq: id of the default RSS queue for the VI.
5245 * Configures VI-specific RSS properties.
5247 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5248 unsigned int flags, unsigned int defq)
5250 struct fw_rss_vi_config_cmd c;
5252 memset(&c, 0, sizeof(c));
5253 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5254 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5255 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5256 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5257 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5258 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5259 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5262 /* Read an RSS table row */
5263 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5265 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5266 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5271 * t4_read_rss - read the contents of the RSS mapping table
5272 * @adapter: the adapter
5273 * @map: holds the contents of the RSS mapping table
5275 * Reads the contents of the RSS hash->queue mapping table.
5277 int t4_read_rss(struct adapter *adapter, u16 *map)
5279 int i, ret, nentries;
5282 nentries = t4_chip_rss_size(adapter);
5283 for (i = 0; i < nentries / 2; ++i) {
5284 ret = rd_rss_row(adapter, i, &val);
5287 *map++ = LKPTBLQUEUE0_G(val);
5288 *map++ = LKPTBLQUEUE1_G(val);
5293 static unsigned int t4_use_ldst(struct adapter *adap)
5295 return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
5299 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5300 * @adap: the adapter
5301 * @cmd: TP fw ldst address space type
5302 * @vals: where the indirect register values are stored/written
5303 * @nregs: how many indirect registers to read/write
5304 * @start_index: index of first indirect register to read/write
5305 * @rw: Read (1) or Write (0)
5306 * @sleep_ok: if true we may sleep while awaiting command completion
5308 * Access TP indirect registers through LDST
5310 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5311 unsigned int nregs, unsigned int start_index,
5312 unsigned int rw, bool sleep_ok)
5316 struct fw_ldst_cmd c;
5318 for (i = 0; i < nregs; i++) {
5319 memset(&c, 0, sizeof(c));
5320 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5322 (rw ? FW_CMD_READ_F :
5324 FW_LDST_CMD_ADDRSPACE_V(cmd));
5325 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5327 c.u.addrval.addr = cpu_to_be32(start_index + i);
5328 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5329 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5335 vals[i] = be32_to_cpu(c.u.addrval.val);
5341 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5342 * @adap: the adapter
5343 * @reg_addr: Address Register
5344 * @reg_data: Data register
5345 * @buff: where the indirect register values are stored/written
5346 * @nregs: how many indirect registers to read/write
5347 * @start_index: index of first indirect register to read/write
5348 * @rw: READ(1) or WRITE(0)
5349 * @sleep_ok: if true we may sleep while awaiting command completion
5351 * Read/Write TP indirect registers through LDST if possible.
5352 * Else, use backdoor access
5354 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5355 u32 *buff, u32 nregs, u32 start_index, int rw,
5363 cmd = FW_LDST_ADDRSPC_TP_PIO;
5365 case TP_TM_PIO_ADDR_A:
5366 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5368 case TP_MIB_INDEX_A:
5369 cmd = FW_LDST_ADDRSPC_TP_MIB;
5372 goto indirect_access;
5375 if (t4_use_ldst(adap))
5376 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5383 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5386 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5392 * t4_tp_pio_read - Read TP PIO registers
5393 * @adap: the adapter
5394 * @buff: where the indirect register values are written
5395 * @nregs: how many indirect registers to read
5396 * @start_index: index of first indirect register to read
5397 * @sleep_ok: if true we may sleep while awaiting command completion
5399 * Read TP PIO Registers
5401 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5402 u32 start_index, bool sleep_ok)
5404 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5405 start_index, 1, sleep_ok);
5409 * t4_tp_pio_write - Write TP PIO registers
5410 * @adap: the adapter
5411 * @buff: where the indirect register values are stored
5412 * @nregs: how many indirect registers to write
5413 * @start_index: index of first indirect register to write
5414 * @sleep_ok: if true we may sleep while awaiting command completion
5416 * Write TP PIO Registers
5418 static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5419 u32 start_index, bool sleep_ok)
5421 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5422 start_index, 0, sleep_ok);
5426 * t4_tp_tm_pio_read - Read TP TM PIO registers
5427 * @adap: the adapter
5428 * @buff: where the indirect register values are written
5429 * @nregs: how many indirect registers to read
5430 * @start_index: index of first indirect register to read
5431 * @sleep_ok: if true we may sleep while awaiting command completion
5433 * Read TP TM PIO Registers
5435 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5436 u32 start_index, bool sleep_ok)
5438 t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5439 nregs, start_index, 1, sleep_ok);
5443 * t4_tp_mib_read - Read TP MIB registers
5444 * @adap: the adapter
5445 * @buff: where the indirect register values are written
5446 * @nregs: how many indirect registers to read
5447 * @start_index: index of first indirect register to read
5448 * @sleep_ok: if true we may sleep while awaiting command completion
5450 * Read TP MIB Registers
5452 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5455 t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5456 start_index, 1, sleep_ok);
5460 * t4_read_rss_key - read the global RSS key
5461 * @adap: the adapter
5462 * @key: 10-entry array holding the 320-bit RSS key
5463 * @sleep_ok: if true we may sleep while awaiting command completion
5465 * Reads the global 320-bit RSS key.
5467 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5469 t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5473 * t4_write_rss_key - program one of the RSS keys
5474 * @adap: the adapter
5475 * @key: 10-entry array holding the 320-bit RSS key
5476 * @idx: which RSS key to write
5477 * @sleep_ok: if true we may sleep while awaiting command completion
5479 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5480 * 0..15 the corresponding entry in the RSS key table is written,
5481 * otherwise the global RSS key is written.
5483 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5486 u8 rss_key_addr_cnt = 16;
5487 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5489 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5490 * allows access to key addresses 16-63 by using KeyWrAddrX
5491 * as index[5:4](upper 2) into key table
5493 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5494 (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5495 rss_key_addr_cnt = 32;
5497 t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5499 if (idx >= 0 && idx < rss_key_addr_cnt) {
5500 if (rss_key_addr_cnt > 16)
5501 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5502 KEYWRADDRX_V(idx >> 4) |
5503 T6_VFWRADDR_V(idx) | KEYWREN_F);
5505 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5506 KEYWRADDR_V(idx) | KEYWREN_F);
5511 * t4_read_rss_pf_config - read PF RSS Configuration Table
5512 * @adapter: the adapter
5513 * @index: the entry in the PF RSS table to read
5514 * @valp: where to store the returned value
5515 * @sleep_ok: if true we may sleep while awaiting command completion
5517 * Reads the PF RSS Configuration Table at the specified index and returns
5518 * the value found there.
5520 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5521 u32 *valp, bool sleep_ok)
5523 t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
5527 * t4_read_rss_vf_config - read VF RSS Configuration Table
5528 * @adapter: the adapter
5529 * @index: the entry in the VF RSS table to read
5530 * @vfl: where to store the returned VFL
5531 * @vfh: where to store the returned VFH
5532 * @sleep_ok: if true we may sleep while awaiting command completion
5534 * Reads the VF RSS Configuration Table at the specified index and returns
5535 * the (VFL, VFH) values found there.
5537 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5538 u32 *vfl, u32 *vfh, bool sleep_ok)
5540 u32 vrt, mask, data;
5542 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5543 mask = VFWRADDR_V(VFWRADDR_M);
5544 data = VFWRADDR_V(index);
5546 mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
5547 data = T6_VFWRADDR_V(index);
5550 /* Request that the index'th VF Table values be read into VFL/VFH.
5552 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5553 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5554 vrt |= data | VFRDEN_F;
5555 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5557 /* Grab the VFL/VFH values ...
5559 t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5560 t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
5564 * t4_read_rss_pf_map - read PF RSS Map
5565 * @adapter: the adapter
5566 * @sleep_ok: if true we may sleep while awaiting command completion
5568 * Reads the PF RSS Map register and returns its value.
5570 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5574 t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
5579 * t4_read_rss_pf_mask - read PF RSS Mask
5580 * @adapter: the adapter
5581 * @sleep_ok: if true we may sleep while awaiting command completion
5583 * Reads the PF RSS Mask register and returns its value.
5585 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5589 t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
5594 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5595 * @adap: the adapter
5596 * @v4: holds the TCP/IP counter values
5597 * @v6: holds the TCP/IPv6 counter values
5598 * @sleep_ok: if true we may sleep while awaiting command completion
5600 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5601 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5603 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5604 struct tp_tcp_stats *v6, bool sleep_ok)
5606 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
5608 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
5609 #define STAT(x) val[STAT_IDX(x)]
5610 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5613 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5614 TP_MIB_TCP_OUT_RST_A, sleep_ok);
5615 v4->tcp_out_rsts = STAT(OUT_RST);
5616 v4->tcp_in_segs = STAT64(IN_SEG);
5617 v4->tcp_out_segs = STAT64(OUT_SEG);
5618 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5621 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5622 TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
5623 v6->tcp_out_rsts = STAT(OUT_RST);
5624 v6->tcp_in_segs = STAT64(IN_SEG);
5625 v6->tcp_out_segs = STAT64(OUT_SEG);
5626 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5634 * t4_tp_get_err_stats - read TP's error MIB counters
5635 * @adap: the adapter
5636 * @st: holds the counter values
5637 * @sleep_ok: if true we may sleep while awaiting command completion
5639 * Returns the values of TP's error counters.
5641 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5644 int nchan = adap->params.arch.nchan;
5646 t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5648 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5650 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5652 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5653 TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5654 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5655 TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5656 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5658 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5659 TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5660 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5661 TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5662 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5667 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5668 * @adap: the adapter
5669 * @st: holds the counter values
5670 * @sleep_ok: if true we may sleep while awaiting command completion
5672 * Returns the values of TP's CPL counters.
5674 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5677 int nchan = adap->params.arch.nchan;
5679 t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
5681 t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
5685 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5686 * @adap: the adapter
5687 * @st: holds the counter values
5688 * @sleep_ok: if true we may sleep while awaiting command completion
5690 * Returns the values of TP's RDMA counters.
5692 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5695 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5700 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5701 * @adap: the adapter
5702 * @idx: the port index
5703 * @st: holds the counter values
5704 * @sleep_ok: if true we may sleep while awaiting command completion
5706 * Returns the values of TP's FCoE counters for the selected port.
5708 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5709 struct tp_fcoe_stats *st, bool sleep_ok)
5713 t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5716 t4_tp_mib_read(adap, &st->frames_drop, 1,
5717 TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5719 t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5722 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5726 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5727 * @adap: the adapter
5728 * @st: holds the counter values
5729 * @sleep_ok: if true we may sleep while awaiting command completion
5731 * Returns the values of TP's counters for non-TCP directly-placed packets.
5733 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5738 t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
5739 st->frames = val[0];
5741 st->octets = ((u64)val[2] << 32) | val[3];
5745 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5746 * @adap: the adapter
5747 * @mtus: where to store the MTU values
5748 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5750 * Reads the HW path MTU table.
5752 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5757 for (i = 0; i < NMTUS; ++i) {
5758 t4_write_reg(adap, TP_MTU_TABLE_A,
5759 MTUINDEX_V(0xff) | MTUVALUE_V(i));
5760 v = t4_read_reg(adap, TP_MTU_TABLE_A);
5761 mtus[i] = MTUVALUE_G(v);
5763 mtu_log[i] = MTUWIDTH_G(v);
5768 * t4_read_cong_tbl - reads the congestion control table
5769 * @adap: the adapter
5770 * @incr: where to store the alpha values
5772 * Reads the additive increments programmed into the HW congestion
5775 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5777 unsigned int mtu, w;
5779 for (mtu = 0; mtu < NMTUS; ++mtu)
5780 for (w = 0; w < NCCTRL_WIN; ++w) {
5781 t4_write_reg(adap, TP_CCTRL_TABLE_A,
5782 ROWINDEX_V(0xffff) | (mtu << 5) | w);
5783 incr[mtu][w] = (u16)t4_read_reg(adap,
5784 TP_CCTRL_TABLE_A) & 0x1fff;
5789 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5790 * @adap: the adapter
5791 * @addr: the indirect TP register address
5792 * @mask: specifies the field within the register to modify
5793 * @val: new value for the field
5795 * Sets a field of an indirect TP register to the given value.
5797 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5798 unsigned int mask, unsigned int val)
5800 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5801 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5802 t4_write_reg(adap, TP_PIO_DATA_A, val);
5806 * init_cong_ctrl - initialize congestion control parameters
5807 * @a: the alpha values for congestion control
5808 * @b: the beta values for congestion control
5810 * Initialize the congestion control parameters.
5812 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5814 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5839 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5842 b[13] = b[14] = b[15] = b[16] = 3;
5843 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5844 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5849 /* The minimum additive increment value for the congestion control table */
5850 #define CC_MIN_INCR 2U
5853 * t4_load_mtus - write the MTU and congestion control HW tables
5854 * @adap: the adapter
5855 * @mtus: the values for the MTU table
5856 * @alpha: the values for the congestion control alpha parameter
5857 * @beta: the values for the congestion control beta parameter
5859 * Write the HW MTU table with the supplied MTUs and the high-speed
5860 * congestion control table with the supplied alpha, beta, and MTUs.
5861 * We write the two tables together because the additive increments
5862 * depend on the MTUs.
5864 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5865 const unsigned short *alpha, const unsigned short *beta)
5867 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5868 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5869 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5870 28672, 40960, 57344, 81920, 114688, 163840, 229376
5875 for (i = 0; i < NMTUS; ++i) {
5876 unsigned int mtu = mtus[i];
5877 unsigned int log2 = fls(mtu);
5879 if (!(mtu & ((1 << log2) >> 2))) /* round */
5881 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5882 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5884 for (w = 0; w < NCCTRL_WIN; ++w) {
5887 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5890 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5891 (w << 16) | (beta[w] << 13) | inc);
5896 /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5897 * clocks. The formula is
5899 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5901 * which is equivalent to
5903 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5905 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5907 u64 v = bytes256 * adap->params.vpd.cclk;
5909 return v * 62 + v / 2;
5913 * t4_get_chan_txrate - get the current per channel Tx rates
5914 * @adap: the adapter
5915 * @nic_rate: rates for NIC traffic
5916 * @ofld_rate: rates for offloaded traffic
5918 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5921 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5925 v = t4_read_reg(adap, TP_TX_TRATE_A);
5926 nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5927 nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5928 if (adap->params.arch.nchan == NCHAN) {
5929 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5930 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5933 v = t4_read_reg(adap, TP_TX_ORATE_A);
5934 ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5935 ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5936 if (adap->params.arch.nchan == NCHAN) {
5937 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5938 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5943 * t4_set_trace_filter - configure one of the tracing filters
5944 * @adap: the adapter
5945 * @tp: the desired trace filter parameters
5946 * @idx: which filter to configure
5947 * @enable: whether to enable or disable the filter
5949 * Configures one of the tracing filters available in HW. If @enable is
5950 * %0 @tp is not examined and may be %NULL. The user is responsible to
5951 * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5953 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5954 int idx, int enable)
5956 int i, ofst = idx * 4;
5957 u32 data_reg, mask_reg, cfg;
5960 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5964 cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5965 if (cfg & TRCMULTIFILTER_F) {
5966 /* If multiple tracers are enabled, then maximum
5967 * capture size is 2.5KB (FIFO size of a single channel)
5968 * minus 2 flits for CPL_TRACE_PKT header.
5970 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5973 /* If multiple tracers are disabled, to avoid deadlocks
5974 * maximum packet capture size of 9600 bytes is recommended.
5975 * Also in this mode, only trace0 can be enabled and running.
5977 if (tp->snap_len > 9600 || idx)
5981 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5982 tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5983 tp->min_len > TFMINPKTSIZE_M)
5986 /* stop the tracer we'll be changing */
5987 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5989 idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5990 data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5991 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5993 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5994 t4_write_reg(adap, data_reg, tp->data[i]);
5995 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5997 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5998 TFCAPTUREMAX_V(tp->snap_len) |
5999 TFMINPKTSIZE_V(tp->min_len));
6000 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
6001 TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
6002 (is_t4(adap->params.chip) ?
6003 TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
6004 T5_TFPORT_V(tp->port) | T5_TFEN_F |
6005 T5_TFINVERTMATCH_V(tp->invert)));
6011 * t4_get_trace_filter - query one of the tracing filters
6012 * @adap: the adapter
6013 * @tp: the current trace filter parameters
6014 * @idx: which trace filter to query
6015 * @enabled: non-zero if the filter is enabled
6017 * Returns the current settings of one of the HW tracing filters.
6019 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6023 int i, ofst = idx * 4;
6024 u32 data_reg, mask_reg;
6026 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
6027 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
6029 if (is_t4(adap->params.chip)) {
6030 *enabled = !!(ctla & TFEN_F);
6031 tp->port = TFPORT_G(ctla);
6032 tp->invert = !!(ctla & TFINVERTMATCH_F);
6034 *enabled = !!(ctla & T5_TFEN_F);
6035 tp->port = T5_TFPORT_G(ctla);
6036 tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
6038 tp->snap_len = TFCAPTUREMAX_G(ctlb);
6039 tp->min_len = TFMINPKTSIZE_G(ctlb);
6040 tp->skip_ofst = TFOFFSET_G(ctla);
6041 tp->skip_len = TFLENGTH_G(ctla);
6043 ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
6044 data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
6045 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
6047 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6048 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6049 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6054 * t4_pmtx_get_stats - returns the HW stats from PMTX
6055 * @adap: the adapter
6056 * @cnt: where to store the count statistics
6057 * @cycles: where to store the cycle statistics
6059 * Returns performance statistics from PMTX.
6061 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6066 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6067 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
6068 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
6069 if (is_t4(adap->params.chip)) {
6070 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
6072 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
6073 PM_TX_DBG_DATA_A, data, 2,
6074 PM_TX_DBG_STAT_MSB_A);
6075 cycles[i] = (((u64)data[0] << 32) | data[1]);
6081 * t4_pmrx_get_stats - returns the HW stats from PMRX
6082 * @adap: the adapter
6083 * @cnt: where to store the count statistics
6084 * @cycles: where to store the cycle statistics
6086 * Returns performance statistics from PMRX.
6088 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6093 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6094 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
6095 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
6096 if (is_t4(adap->params.chip)) {
6097 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
6099 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
6100 PM_RX_DBG_DATA_A, data, 2,
6101 PM_RX_DBG_STAT_MSB_A);
6102 cycles[i] = (((u64)data[0] << 32) | data[1]);
6108 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
6109 * @adapter: the adapter
6110 * @pidx: the port index
6112 * Computes and returns a bitmap indicating which MPS buffer groups are
6113 * associated with the given Port. Bit i is set if buffer group i is
6116 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
6119 unsigned int chip_version, nports;
6121 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6122 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6124 switch (chip_version) {
6129 case 2: return 3 << (2 * pidx);
6130 case 4: return 1 << pidx;
6136 case 2: return 1 << (2 * pidx);
6141 dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6142 chip_version, nports);
6148 * t4_get_mps_bg_map - return the buffer groups associated with a port
6149 * @adapter: the adapter
6150 * @pidx: the port index
6152 * Returns a bitmap indicating which MPS buffer groups are associated
6153 * with the given Port. Bit i is set if buffer group i is used by the
6156 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6159 unsigned int nports;
6161 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6162 if (pidx >= nports) {
6163 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
6168 /* If we've already retrieved/computed this, just return the result.
6170 mps_bg_map = adapter->params.mps_bg_map;
6171 if (mps_bg_map[pidx])
6172 return mps_bg_map[pidx];
6174 /* Newer Firmware can tell us what the MPS Buffer Group Map is.
6175 * If we're talking to such Firmware, let it tell us. If the new
6176 * API isn't supported, revert back to old hardcoded way. The value
6177 * obtained from Firmware is encoded in below format:
6179 * val = (( MPSBGMAP[Port 3] << 24 ) |
6180 * ( MPSBGMAP[Port 2] << 16 ) |
6181 * ( MPSBGMAP[Port 1] << 8 ) |
6182 * ( MPSBGMAP[Port 0] << 0 ))
6184 if (adapter->flags & CXGB4_FW_OK) {
6188 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6189 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6190 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6191 0, 1, ¶m, &val);
6195 /* Store the BG Map for all of the Ports in order to
6196 * avoid more calls to the Firmware in the future.
6198 for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
6199 mps_bg_map[p] = val & 0xff;
6201 return mps_bg_map[pidx];
6205 /* Either we're not talking to the Firmware or we're dealing with
6206 * older Firmware which doesn't support the new API to get the MPS
6207 * Buffer Group Map. Fall back to computing it ourselves.
6209 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6210 return mps_bg_map[pidx];
6214 * t4_get_tp_e2c_map - return the E2C channel map associated with a port
6215 * @adapter: the adapter
6216 * @pidx: the port index
6218 static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6220 unsigned int nports;
6224 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6225 if (pidx >= nports) {
6226 CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n",
6231 /* FW version >= 1.16.44.0 can determine E2C channel map using
6232 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6234 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6235 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPCHMAP));
6236 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6237 0, 1, ¶m, &val);
6239 return (val >> (8 * pidx)) & 0xff;
6245 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6246 * @adap: the adapter
6247 * @pidx: the port index
6249 * Returns a bitmap indicating which TP Ingress Channels are associated
6250 * with a given Port. Bit i is set if TP Ingress Channel i is used by
6253 unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6255 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6256 unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6258 if (pidx >= nports) {
6259 dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6264 switch (chip_version) {
6267 /* Note that this happens to be the same values as the MPS
6268 * Buffer Group Map for these Chips. But we replicate the code
6269 * here because they're really separate concepts.
6273 case 2: return 3 << (2 * pidx);
6274 case 4: return 1 << pidx;
6281 case 2: return 1 << pidx;
6286 dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6287 chip_version, nports);
6292 * t4_get_port_type_description - return Port Type string description
6293 * @port_type: firmware Port Type enumeration
6295 const char *t4_get_port_type_description(enum fw_port_type port_type)
6297 static const char *const port_type_description[] = {
6323 if (port_type < ARRAY_SIZE(port_type_description))
6324 return port_type_description[port_type];
6329 * t4_get_port_stats_offset - collect port stats relative to a previous
6331 * @adap: The adapter
6333 * @stats: Current stats to fill
6334 * @offset: Previous stats snapshot
6336 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6337 struct port_stats *stats,
6338 struct port_stats *offset)
6343 t4_get_port_stats(adap, idx, stats);
6344 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6345 i < (sizeof(struct port_stats) / sizeof(u64));
6351 * t4_get_port_stats - collect port statistics
6352 * @adap: the adapter
6353 * @idx: the port index
6354 * @p: the stats structure to fill
6356 * Collect statistics related to the given port from HW.
6358 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6360 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6361 u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
6363 #define GET_STAT(name) \
6364 t4_read_reg64(adap, \
6365 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
6366 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
6367 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6369 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6370 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6371 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6372 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6373 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6374 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6375 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6376 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6377 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6378 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6379 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6380 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6381 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6382 p->tx_drop = GET_STAT(TX_PORT_DROP);
6383 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6384 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6385 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6386 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6387 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6388 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6389 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6390 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6391 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6393 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6394 if (stat_ctl & COUNTPAUSESTATTX_F)
6395 p->tx_frames_64 -= p->tx_pause;
6396 if (stat_ctl & COUNTPAUSEMCTX_F)
6397 p->tx_mcast_frames -= p->tx_pause;
6399 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6400 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6401 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6402 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6403 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6404 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6405 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6406 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6407 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6408 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6409 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6410 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6411 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6412 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6413 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6414 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6415 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6416 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6417 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6418 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6419 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6420 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6421 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6422 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6423 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6424 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6425 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6427 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6428 if (stat_ctl & COUNTPAUSESTATRX_F)
6429 p->rx_frames_64 -= p->rx_pause;
6430 if (stat_ctl & COUNTPAUSEMCRX_F)
6431 p->rx_mcast_frames -= p->rx_pause;
6434 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6435 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6436 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6437 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6438 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6439 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6440 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6441 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6448 * t4_get_lb_stats - collect loopback port statistics
6449 * @adap: the adapter
6450 * @idx: the loopback port index
6451 * @p: the stats structure to fill
6453 * Return HW statistics for the given loopback port.
6455 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6457 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6459 #define GET_STAT(name) \
6460 t4_read_reg64(adap, \
6461 (is_t4(adap->params.chip) ? \
6462 PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6463 T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6464 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6466 p->octets = GET_STAT(BYTES);
6467 p->frames = GET_STAT(FRAMES);
6468 p->bcast_frames = GET_STAT(BCAST);
6469 p->mcast_frames = GET_STAT(MCAST);
6470 p->ucast_frames = GET_STAT(UCAST);
6471 p->error_frames = GET_STAT(ERROR);
6473 p->frames_64 = GET_STAT(64B);
6474 p->frames_65_127 = GET_STAT(65B_127B);
6475 p->frames_128_255 = GET_STAT(128B_255B);
6476 p->frames_256_511 = GET_STAT(256B_511B);
6477 p->frames_512_1023 = GET_STAT(512B_1023B);
6478 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6479 p->frames_1519_max = GET_STAT(1519B_MAX);
6480 p->drop = GET_STAT(DROP_FRAMES);
6482 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6483 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6484 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6485 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6486 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6487 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6488 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6489 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6495 /* t4_mk_filtdelwr - create a delete filter WR
6496 * @ftid: the filter ID
6497 * @wr: the filter work request to populate
6498 * @qid: ingress queue to receive the delete notification
6500 * Creates a filter work request to delete the supplied filter. If @qid is
6501 * negative the delete notification is suppressed.
6503 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6505 memset(wr, 0, sizeof(*wr));
6506 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6507 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6508 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6509 FW_FILTER_WR_NOREPLY_V(qid < 0));
6510 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
6512 wr->rx_chan_rx_rpl_iq =
6513 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
6516 #define INIT_CMD(var, cmd, rd_wr) do { \
6517 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6518 FW_CMD_REQUEST_F | \
6519 FW_CMD_##rd_wr##_F); \
6520 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6523 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6527 struct fw_ldst_cmd c;
6529 memset(&c, 0, sizeof(c));
6530 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6531 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6535 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6536 c.u.addrval.addr = cpu_to_be32(addr);
6537 c.u.addrval.val = cpu_to_be32(val);
6539 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6543 * t4_mdio_rd - read a PHY register through MDIO
6544 * @adap: the adapter
6545 * @mbox: mailbox to use for the FW command
6546 * @phy_addr: the PHY address
6547 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6548 * @reg: the register to read
6549 * @valp: where to store the value
6551 * Issues a FW command through the given mailbox to read a PHY register.
6553 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6554 unsigned int mmd, unsigned int reg, u16 *valp)
6558 struct fw_ldst_cmd c;
6560 memset(&c, 0, sizeof(c));
6561 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6562 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6563 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6565 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6566 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6567 FW_LDST_CMD_MMD_V(mmd));
6568 c.u.mdio.raddr = cpu_to_be16(reg);
6570 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6572 *valp = be16_to_cpu(c.u.mdio.rval);
6577 * t4_mdio_wr - write a PHY register through MDIO
6578 * @adap: the adapter
6579 * @mbox: mailbox to use for the FW command
6580 * @phy_addr: the PHY address
6581 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6582 * @reg: the register to write
6583 * @val: value to write
6585 * Issues a FW command through the given mailbox to write a PHY register.
6587 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6588 unsigned int mmd, unsigned int reg, u16 val)
6591 struct fw_ldst_cmd c;
6593 memset(&c, 0, sizeof(c));
6594 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6595 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6596 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6598 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6599 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6600 FW_LDST_CMD_MMD_V(mmd));
6601 c.u.mdio.raddr = cpu_to_be16(reg);
6602 c.u.mdio.rval = cpu_to_be16(val);
6604 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6608 * t4_sge_decode_idma_state - decode the idma state
6609 * @adapter: the adapter
6610 * @state: the state idma is stuck in
6612 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6614 static const char * const t4_decode[] = {
6616 "IDMA_PUSH_MORE_CPL_FIFO",
6617 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6619 "IDMA_PHYSADDR_SEND_PCIEHDR",
6620 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6621 "IDMA_PHYSADDR_SEND_PAYLOAD",
6622 "IDMA_SEND_FIFO_TO_IMSG",
6623 "IDMA_FL_REQ_DATA_FL_PREP",
6624 "IDMA_FL_REQ_DATA_FL",
6626 "IDMA_FL_H_REQ_HEADER_FL",
6627 "IDMA_FL_H_SEND_PCIEHDR",
6628 "IDMA_FL_H_PUSH_CPL_FIFO",
6629 "IDMA_FL_H_SEND_CPL",
6630 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6631 "IDMA_FL_H_SEND_IP_HDR",
6632 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6633 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6634 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6635 "IDMA_FL_D_SEND_PCIEHDR",
6636 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6637 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6638 "IDMA_FL_SEND_PCIEHDR",
6639 "IDMA_FL_PUSH_CPL_FIFO",
6641 "IDMA_FL_SEND_PAYLOAD_FIRST",
6642 "IDMA_FL_SEND_PAYLOAD",
6643 "IDMA_FL_REQ_NEXT_DATA_FL",
6644 "IDMA_FL_SEND_NEXT_PCIEHDR",
6645 "IDMA_FL_SEND_PADDING",
6646 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6647 "IDMA_FL_SEND_FIFO_TO_IMSG",
6648 "IDMA_FL_REQ_DATAFL_DONE",
6649 "IDMA_FL_REQ_HEADERFL_DONE",
6651 static const char * const t5_decode[] = {
6654 "IDMA_PUSH_MORE_CPL_FIFO",
6655 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6656 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6657 "IDMA_PHYSADDR_SEND_PCIEHDR",
6658 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6659 "IDMA_PHYSADDR_SEND_PAYLOAD",
6660 "IDMA_SEND_FIFO_TO_IMSG",
6661 "IDMA_FL_REQ_DATA_FL",
6663 "IDMA_FL_DROP_SEND_INC",
6664 "IDMA_FL_H_REQ_HEADER_FL",
6665 "IDMA_FL_H_SEND_PCIEHDR",
6666 "IDMA_FL_H_PUSH_CPL_FIFO",
6667 "IDMA_FL_H_SEND_CPL",
6668 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6669 "IDMA_FL_H_SEND_IP_HDR",
6670 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6671 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6672 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6673 "IDMA_FL_D_SEND_PCIEHDR",
6674 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6675 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6676 "IDMA_FL_SEND_PCIEHDR",
6677 "IDMA_FL_PUSH_CPL_FIFO",
6679 "IDMA_FL_SEND_PAYLOAD_FIRST",
6680 "IDMA_FL_SEND_PAYLOAD",
6681 "IDMA_FL_REQ_NEXT_DATA_FL",
6682 "IDMA_FL_SEND_NEXT_PCIEHDR",
6683 "IDMA_FL_SEND_PADDING",
6684 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6686 static const char * const t6_decode[] = {
6688 "IDMA_PUSH_MORE_CPL_FIFO",
6689 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6690 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6691 "IDMA_PHYSADDR_SEND_PCIEHDR",
6692 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6693 "IDMA_PHYSADDR_SEND_PAYLOAD",
6694 "IDMA_FL_REQ_DATA_FL",
6696 "IDMA_FL_DROP_SEND_INC",
6697 "IDMA_FL_H_REQ_HEADER_FL",
6698 "IDMA_FL_H_SEND_PCIEHDR",
6699 "IDMA_FL_H_PUSH_CPL_FIFO",
6700 "IDMA_FL_H_SEND_CPL",
6701 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6702 "IDMA_FL_H_SEND_IP_HDR",
6703 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6704 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6705 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6706 "IDMA_FL_D_SEND_PCIEHDR",
6707 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6708 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6709 "IDMA_FL_SEND_PCIEHDR",
6710 "IDMA_FL_PUSH_CPL_FIFO",
6712 "IDMA_FL_SEND_PAYLOAD_FIRST",
6713 "IDMA_FL_SEND_PAYLOAD",
6714 "IDMA_FL_REQ_NEXT_DATA_FL",
6715 "IDMA_FL_SEND_NEXT_PCIEHDR",
6716 "IDMA_FL_SEND_PADDING",
6717 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6719 static const u32 sge_regs[] = {
6720 SGE_DEBUG_DATA_LOW_INDEX_2_A,
6721 SGE_DEBUG_DATA_LOW_INDEX_3_A,
6722 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
6724 const char **sge_idma_decode;
6725 int sge_idma_decode_nstates;
6727 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6729 /* Select the right set of decode strings to dump depending on the
6730 * adapter chip type.
6732 switch (chip_version) {
6734 sge_idma_decode = (const char **)t4_decode;
6735 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6739 sge_idma_decode = (const char **)t5_decode;
6740 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6744 sge_idma_decode = (const char **)t6_decode;
6745 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6749 dev_err(adapter->pdev_dev,
6750 "Unsupported chip version %d\n", chip_version);
6754 if (is_t4(adapter->params.chip)) {
6755 sge_idma_decode = (const char **)t4_decode;
6756 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6758 sge_idma_decode = (const char **)t5_decode;
6759 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6762 if (state < sge_idma_decode_nstates)
6763 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6765 CH_WARN(adapter, "idma state %d unknown\n", state);
6767 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6768 CH_WARN(adapter, "SGE register %#x value %#x\n",
6769 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6773 * t4_sge_ctxt_flush - flush the SGE context cache
6774 * @adap: the adapter
6775 * @mbox: mailbox to use for the FW command
6776 * @ctxt_type: Egress or Ingress
6778 * Issues a FW command through the given mailbox to flush the
6779 * SGE context cache.
6781 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
6785 struct fw_ldst_cmd c;
6787 memset(&c, 0, sizeof(c));
6788 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
6789 FW_LDST_ADDRSPC_SGE_EGRC :
6790 FW_LDST_ADDRSPC_SGE_INGC);
6791 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6792 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6794 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6795 c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6797 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6802 * t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
6803 * @adap: the adapter
6804 * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
6805 * @dbqtimers: SGE Doorbell Queue Timer table
6807 * Reads the SGE Doorbell Queue Timer values into the provided table.
6808 * Returns 0 on success (Firmware and Hardware support this feature),
6809 * an error on failure.
6811 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
6814 int ret, dbqtimerix;
6818 while (dbqtimerix < ndbqtimers) {
6820 u32 params[7], vals[7];
6822 nparams = ndbqtimers - dbqtimerix;
6823 if (nparams > ARRAY_SIZE(params))
6824 nparams = ARRAY_SIZE(params);
6826 for (param = 0; param < nparams; param++)
6828 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6829 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
6830 FW_PARAMS_PARAM_Y_V(dbqtimerix + param));
6831 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
6832 nparams, params, vals);
6836 for (param = 0; param < nparams; param++)
6837 dbqtimers[dbqtimerix++] = vals[param];
6843 * t4_fw_hello - establish communication with FW
6844 * @adap: the adapter
6845 * @mbox: mailbox to use for the FW command
6846 * @evt_mbox: mailbox to receive async FW events
6847 * @master: specifies the caller's willingness to be the device master
6848 * @state: returns the current device state (if non-NULL)
6850 * Issues a command to establish communication with FW. Returns either
6851 * an error (negative integer) or the mailbox of the Master PF.
6853 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6854 enum dev_master master, enum dev_state *state)
6857 struct fw_hello_cmd c;
6859 unsigned int master_mbox;
6860 int retries = FW_CMD_HELLO_RETRIES;
6863 memset(&c, 0, sizeof(c));
6864 INIT_CMD(c, HELLO, WRITE);
6865 c.err_to_clearinit = cpu_to_be32(
6866 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6867 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6868 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6869 mbox : FW_HELLO_CMD_MBMASTER_M) |
6870 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6871 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6872 FW_HELLO_CMD_CLEARINIT_F);
6875 * Issue the HELLO command to the firmware. If it's not successful
6876 * but indicates that we got a "busy" or "timeout" condition, retry
6877 * the HELLO until we exhaust our retry limit. If we do exceed our
6878 * retry limit, check to see if the firmware left us any error
6879 * information and report that if so.
6881 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6883 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6885 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6886 t4_report_fw_error(adap);
6890 v = be32_to_cpu(c.err_to_clearinit);
6891 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6893 if (v & FW_HELLO_CMD_ERR_F)
6894 *state = DEV_STATE_ERR;
6895 else if (v & FW_HELLO_CMD_INIT_F)
6896 *state = DEV_STATE_INIT;
6898 *state = DEV_STATE_UNINIT;
6902 * If we're not the Master PF then we need to wait around for the
6903 * Master PF Driver to finish setting up the adapter.
6905 * Note that we also do this wait if we're a non-Master-capable PF and
6906 * there is no current Master PF; a Master PF may show up momentarily
6907 * and we wouldn't want to fail pointlessly. (This can happen when an
6908 * OS loads lots of different drivers rapidly at the same time). In
6909 * this case, the Master PF returned by the firmware will be
6910 * PCIE_FW_MASTER_M so the test below will work ...
6912 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6913 master_mbox != mbox) {
6914 int waiting = FW_CMD_HELLO_TIMEOUT;
6917 * Wait for the firmware to either indicate an error or
6918 * initialized state. If we see either of these we bail out
6919 * and report the issue to the caller. If we exhaust the
6920 * "hello timeout" and we haven't exhausted our retries, try
6921 * again. Otherwise bail with a timeout error.
6930 * If neither Error nor Initialized are indicated
6931 * by the firmware keep waiting till we exhaust our
6932 * timeout ... and then retry if we haven't exhausted
6935 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6936 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6947 * We either have an Error or Initialized condition
6948 * report errors preferentially.
6951 if (pcie_fw & PCIE_FW_ERR_F)
6952 *state = DEV_STATE_ERR;
6953 else if (pcie_fw & PCIE_FW_INIT_F)
6954 *state = DEV_STATE_INIT;
6958 * If we arrived before a Master PF was selected and
6959 * there's not a valid Master PF, grab its identity
6962 if (master_mbox == PCIE_FW_MASTER_M &&
6963 (pcie_fw & PCIE_FW_MASTER_VLD_F))
6964 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6973 * t4_fw_bye - end communication with FW
6974 * @adap: the adapter
6975 * @mbox: mailbox to use for the FW command
6977 * Issues a command to terminate communication with FW.
6979 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6981 struct fw_bye_cmd c;
6983 memset(&c, 0, sizeof(c));
6984 INIT_CMD(c, BYE, WRITE);
6985 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6989 * t4_early_init - ask FW to initialize the device
6990 * @adap: the adapter
6991 * @mbox: mailbox to use for the FW command
6993 * Issues a command to FW to partially initialize the device. This
6994 * performs initialization that generally doesn't depend on user input.
6996 int t4_early_init(struct adapter *adap, unsigned int mbox)
6998 struct fw_initialize_cmd c;
7000 memset(&c, 0, sizeof(c));
7001 INIT_CMD(c, INITIALIZE, WRITE);
7002 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7006 * t4_fw_reset - issue a reset to FW
7007 * @adap: the adapter
7008 * @mbox: mailbox to use for the FW command
7009 * @reset: specifies the type of reset to perform
7011 * Issues a reset command of the specified type to FW.
7013 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7015 struct fw_reset_cmd c;
7017 memset(&c, 0, sizeof(c));
7018 INIT_CMD(c, RESET, WRITE);
7019 c.val = cpu_to_be32(reset);
7020 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7024 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7025 * @adap: the adapter
7026 * @mbox: mailbox to use for the FW RESET command (if desired)
7027 * @force: force uP into RESET even if FW RESET command fails
7029 * Issues a RESET command to firmware (if desired) with a HALT indication
7030 * and then puts the microprocessor into RESET state. The RESET command
7031 * will only be issued if a legitimate mailbox is provided (mbox <=
7032 * PCIE_FW_MASTER_M).
7034 * This is generally used in order for the host to safely manipulate the
7035 * adapter without fear of conflicting with whatever the firmware might
7036 * be doing. The only way out of this state is to RESTART the firmware
7039 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7044 * If a legitimate mailbox is provided, issue a RESET command
7045 * with a HALT indication.
7047 if (mbox <= PCIE_FW_MASTER_M) {
7048 struct fw_reset_cmd c;
7050 memset(&c, 0, sizeof(c));
7051 INIT_CMD(c, RESET, WRITE);
7052 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
7053 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
7054 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7058 * Normally we won't complete the operation if the firmware RESET
7059 * command fails but if our caller insists we'll go ahead and put the
7060 * uP into RESET. This can be useful if the firmware is hung or even
7061 * missing ... We'll have to take the risk of putting the uP into
7062 * RESET without the cooperation of firmware in that case.
7064 * We also force the firmware's HALT flag to be on in case we bypassed
7065 * the firmware RESET command above or we're dealing with old firmware
7066 * which doesn't have the HALT capability. This will serve as a flag
7067 * for the incoming firmware to know that it's coming out of a HALT
7068 * rather than a RESET ... if it's new enough to understand that ...
7070 if (ret == 0 || force) {
7071 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
7072 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
7077 * And we always return the result of the firmware RESET command
7078 * even when we force the uP into RESET ...
7084 * t4_fw_restart - restart the firmware by taking the uP out of RESET
7085 * @adap: the adapter
7086 * @mbox: mailbox to use for the FW command
7087 * @reset: if we want to do a RESET to restart things
7089 * Restart firmware previously halted by t4_fw_halt(). On successful
7090 * return the previous PF Master remains as the new PF Master and there
7091 * is no need to issue a new HELLO command, etc.
7093 * We do this in two ways:
7095 * 1. If we're dealing with newer firmware we'll simply want to take
7096 * the chip's microprocessor out of RESET. This will cause the
7097 * firmware to start up from its start vector. And then we'll loop
7098 * until the firmware indicates it's started again (PCIE_FW.HALT
7099 * reset to 0) or we timeout.
7101 * 2. If we're dealing with older firmware then we'll need to RESET
7102 * the chip since older firmware won't recognize the PCIE_FW.HALT
7103 * flag and automatically RESET itself on startup.
7105 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7109 * Since we're directing the RESET instead of the firmware
7110 * doing it automatically, we need to clear the PCIE_FW.HALT
7113 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
7116 * If we've been given a valid mailbox, first try to get the
7117 * firmware to do the RESET. If that works, great and we can
7118 * return success. Otherwise, if we haven't been given a
7119 * valid mailbox or the RESET command failed, fall back to
7120 * hitting the chip with a hammer.
7122 if (mbox <= PCIE_FW_MASTER_M) {
7123 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
7125 if (t4_fw_reset(adap, mbox,
7126 PIORST_F | PIORSTMODE_F) == 0)
7130 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
7135 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
7136 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7137 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
7148 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7149 * @adap: the adapter
7150 * @mbox: mailbox to use for the FW RESET command (if desired)
7151 * @fw_data: the firmware image to write
7153 * @force: force upgrade even if firmware doesn't cooperate
7155 * Perform all of the steps necessary for upgrading an adapter's
7156 * firmware image. Normally this requires the cooperation of the
7157 * existing firmware in order to halt all existing activities
7158 * but if an invalid mailbox token is passed in we skip that step
7159 * (though we'll still put the adapter microprocessor into RESET in
7162 * On successful return the new firmware will have been loaded and
7163 * the adapter will have been fully RESET losing all previous setup
7164 * state. On unsuccessful return the adapter may be completely hosed ...
7165 * positive errno indicates that the adapter is ~probably~ intact, a
7166 * negative errno indicates that things are looking bad ...
7168 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7169 const u8 *fw_data, unsigned int size, int force)
7171 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7174 if (!t4_fw_matches_chip(adap, fw_hdr))
7177 /* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
7178 * set wont be sent when we are flashing FW.
7180 adap->flags &= ~CXGB4_FW_OK;
7182 ret = t4_fw_halt(adap, mbox, force);
7183 if (ret < 0 && !force)
7186 ret = t4_load_fw(adap, fw_data, size);
7191 * If there was a Firmware Configuration File stored in FLASH,
7192 * there's a good chance that it won't be compatible with the new
7193 * Firmware. In order to prevent difficult to diagnose adapter
7194 * initialization issues, we clear out the Firmware Configuration File
7195 * portion of the FLASH . The user will need to re-FLASH a new
7196 * Firmware Configuration File which is compatible with the new
7197 * Firmware if that's desired.
7199 (void)t4_load_cfg(adap, NULL, 0);
7202 * Older versions of the firmware don't understand the new
7203 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7204 * restart. So for newly loaded older firmware we'll have to do the
7205 * RESET for it so it starts up on a clean slate. We can tell if
7206 * the newly loaded firmware will handle this right by checking
7207 * its header flags to see if it advertises the capability.
7209 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7210 ret = t4_fw_restart(adap, mbox, reset);
7212 /* Grab potentially new Firmware Device Log parameters so we can see
7213 * how healthy the new Firmware is. It's okay to contact the new
7214 * Firmware for these parameters even though, as far as it's
7215 * concerned, we've never said "HELLO" to it ...
7217 (void)t4_init_devlog_params(adap);
7219 adap->flags |= CXGB4_FW_OK;
7224 * t4_fl_pkt_align - return the fl packet alignment
7225 * @adap: the adapter
7227 * T4 has a single field to specify the packing and padding boundary.
7228 * T5 onwards has separate fields for this and hence the alignment for
7229 * next packet offset is maximum of these two.
7232 int t4_fl_pkt_align(struct adapter *adap)
7234 u32 sge_control, sge_control2;
7235 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7237 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
7239 /* T4 uses a single control field to specify both the PCIe Padding and
7240 * Packing Boundary. T5 introduced the ability to specify these
7241 * separately. The actual Ingress Packet Data alignment boundary
7242 * within Packed Buffer Mode is the maximum of these two
7243 * specifications. (Note that it makes no real practical sense to
7244 * have the Padding Boundary be larger than the Packing Boundary but you
7245 * could set the chip up that way and, in fact, legacy T4 code would
7246 * end doing this because it would initialize the Padding Boundary and
7247 * leave the Packing Boundary initialized to 0 (16 bytes).)
7248 * Padding Boundary values in T6 starts from 8B,
7249 * where as it is 32B for T4 and T5.
7251 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7252 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
7254 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
7256 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
7258 fl_align = ingpadboundary;
7259 if (!is_t4(adap->params.chip)) {
7260 /* T5 has a weird interpretation of one of the PCIe Packing
7261 * Boundary values. No idea why ...
7263 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
7264 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
7265 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
7266 ingpackboundary = 16;
7268 ingpackboundary = 1 << (ingpackboundary +
7269 INGPACKBOUNDARY_SHIFT_X);
7271 fl_align = max(ingpadboundary, ingpackboundary);
7277 * t4_fixup_host_params - fix up host-dependent parameters
7278 * @adap: the adapter
7279 * @page_size: the host's Base Page Size
7280 * @cache_line_size: the host's Cache Line Size
7282 * Various registers in T4 contain values which are dependent on the
7283 * host's Base Page and Cache Line Sizes. This function will fix all of
7284 * those registers with the appropriate values as passed in ...
7286 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7287 unsigned int cache_line_size)
7289 unsigned int page_shift = fls(page_size) - 1;
7290 unsigned int sge_hps = page_shift - 10;
7291 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7292 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7293 unsigned int fl_align_log = fls(fl_align) - 1;
7295 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7296 HOSTPAGESIZEPF0_V(sge_hps) |
7297 HOSTPAGESIZEPF1_V(sge_hps) |
7298 HOSTPAGESIZEPF2_V(sge_hps) |
7299 HOSTPAGESIZEPF3_V(sge_hps) |
7300 HOSTPAGESIZEPF4_V(sge_hps) |
7301 HOSTPAGESIZEPF5_V(sge_hps) |
7302 HOSTPAGESIZEPF6_V(sge_hps) |
7303 HOSTPAGESIZEPF7_V(sge_hps));
7305 if (is_t4(adap->params.chip)) {
7306 t4_set_reg_field(adap, SGE_CONTROL_A,
7307 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7308 EGRSTATUSPAGESIZE_F,
7309 INGPADBOUNDARY_V(fl_align_log -
7310 INGPADBOUNDARY_SHIFT_X) |
7311 EGRSTATUSPAGESIZE_V(stat_len != 64));
7313 unsigned int pack_align;
7314 unsigned int ingpad, ingpack;
7316 /* T5 introduced the separation of the Free List Padding and
7317 * Packing Boundaries. Thus, we can select a smaller Padding
7318 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7319 * Bandwidth, and use a Packing Boundary which is large enough
7320 * to avoid false sharing between CPUs, etc.
7322 * For the PCI Link, the smaller the Padding Boundary the
7323 * better. For the Memory Controller, a smaller Padding
7324 * Boundary is better until we cross under the Memory Line
7325 * Size (the minimum unit of transfer to/from Memory). If we
7326 * have a Padding Boundary which is smaller than the Memory
7327 * Line Size, that'll involve a Read-Modify-Write cycle on the
7328 * Memory Controller which is never good.
7331 /* We want the Packing Boundary to be based on the Cache Line
7332 * Size in order to help avoid False Sharing performance
7333 * issues between CPUs, etc. We also want the Packing
7334 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7335 * get best performance when the Packing Boundary is a
7336 * multiple of the Maximum Payload Size.
7338 pack_align = fl_align;
7339 if (pci_is_pcie(adap->pdev)) {
7340 unsigned int mps, mps_log;
7343 /* The PCIe Device Control Maximum Payload Size field
7344 * [bits 7:5] encodes sizes as powers of 2 starting at
7347 pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL,
7349 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7351 if (mps > pack_align)
7355 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7356 * value for the Packing Boundary. This corresponds to 16
7357 * bytes instead of the expected 32 bytes. So if we want 32
7358 * bytes, the best we can really do is 64 bytes ...
7360 if (pack_align <= 16) {
7361 ingpack = INGPACKBOUNDARY_16B_X;
7363 } else if (pack_align == 32) {
7364 ingpack = INGPACKBOUNDARY_64B_X;
7367 unsigned int pack_align_log = fls(pack_align) - 1;
7369 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7370 fl_align = pack_align;
7373 /* Use the smallest Ingress Padding which isn't smaller than
7374 * the Memory Controller Read/Write Size. We'll take that as
7375 * being 8 bytes since we don't know of any system with a
7376 * wider Memory Controller Bus Width.
7378 if (is_t5(adap->params.chip))
7379 ingpad = INGPADBOUNDARY_32B_X;
7381 ingpad = T6_INGPADBOUNDARY_8B_X;
7383 t4_set_reg_field(adap, SGE_CONTROL_A,
7384 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7385 EGRSTATUSPAGESIZE_F,
7386 INGPADBOUNDARY_V(ingpad) |
7387 EGRSTATUSPAGESIZE_V(stat_len != 64));
7388 t4_set_reg_field(adap, SGE_CONTROL2_A,
7389 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
7390 INGPACKBOUNDARY_V(ingpack));
7393 * Adjust various SGE Free List Host Buffer Sizes.
7395 * This is something of a crock since we're using fixed indices into
7396 * the array which are also known by the sge.c code and the T4
7397 * Firmware Configuration File. We need to come up with a much better
7398 * approach to managing this array. For now, the first four entries
7403 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7404 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7406 * For the single-MTU buffers in unpacked mode we need to include
7407 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7408 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7409 * Padding boundary. All of these are accommodated in the Factory
7410 * Default Firmware Configuration File but we need to adjust it for
7411 * this host's cache line size.
7413 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7414 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7415 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
7417 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7418 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
7421 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
7427 * t4_fw_initialize - ask FW to initialize the device
7428 * @adap: the adapter
7429 * @mbox: mailbox to use for the FW command
7431 * Issues a command to FW to partially initialize the device. This
7432 * performs initialization that generally doesn't depend on user input.
7434 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7436 struct fw_initialize_cmd c;
7438 memset(&c, 0, sizeof(c));
7439 INIT_CMD(c, INITIALIZE, WRITE);
7440 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7444 * t4_query_params_rw - query FW or device parameters
7445 * @adap: the adapter
7446 * @mbox: mailbox to use for the FW command
7449 * @nparams: the number of parameters
7450 * @params: the parameter names
7451 * @val: the parameter values
7452 * @rw: Write and read flag
7453 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7455 * Reads the value of FW or device parameters. Up to 7 parameters can be
7458 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7459 unsigned int vf, unsigned int nparams, const u32 *params,
7460 u32 *val, int rw, bool sleep_ok)
7463 struct fw_params_cmd c;
7464 __be32 *p = &c.param[0].mnem;
7469 memset(&c, 0, sizeof(c));
7470 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7471 FW_CMD_REQUEST_F | FW_CMD_READ_F |
7472 FW_PARAMS_CMD_PFN_V(pf) |
7473 FW_PARAMS_CMD_VFN_V(vf));
7474 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7476 for (i = 0; i < nparams; i++) {
7477 *p++ = cpu_to_be32(*params++);
7479 *p = cpu_to_be32(*(val + i));
7483 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7485 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7486 *val++ = be32_to_cpu(*p);
7490 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7491 unsigned int vf, unsigned int nparams, const u32 *params,
7494 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7498 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7499 unsigned int vf, unsigned int nparams, const u32 *params,
7502 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7507 * t4_set_params_timeout - sets FW or device parameters
7508 * @adap: the adapter
7509 * @mbox: mailbox to use for the FW command
7512 * @nparams: the number of parameters
7513 * @params: the parameter names
7514 * @val: the parameter values
7515 * @timeout: the timeout time
7517 * Sets the value of FW or device parameters. Up to 7 parameters can be
7518 * specified at once.
7520 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7521 unsigned int pf, unsigned int vf,
7522 unsigned int nparams, const u32 *params,
7523 const u32 *val, int timeout)
7525 struct fw_params_cmd c;
7526 __be32 *p = &c.param[0].mnem;
7531 memset(&c, 0, sizeof(c));
7532 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7533 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7534 FW_PARAMS_CMD_PFN_V(pf) |
7535 FW_PARAMS_CMD_VFN_V(vf));
7536 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7539 *p++ = cpu_to_be32(*params++);
7540 *p++ = cpu_to_be32(*val++);
7543 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7547 * t4_set_params - sets FW or device parameters
7548 * @adap: the adapter
7549 * @mbox: mailbox to use for the FW command
7552 * @nparams: the number of parameters
7553 * @params: the parameter names
7554 * @val: the parameter values
7556 * Sets the value of FW or device parameters. Up to 7 parameters can be
7557 * specified at once.
7559 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7560 unsigned int vf, unsigned int nparams, const u32 *params,
7563 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7564 FW_CMD_MAX_TIMEOUT);
7568 * t4_cfg_pfvf - configure PF/VF resource limits
7569 * @adap: the adapter
7570 * @mbox: mailbox to use for the FW command
7571 * @pf: the PF being configured
7572 * @vf: the VF being configured
7573 * @txq: the max number of egress queues
7574 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7575 * @rxqi: the max number of interrupt-capable ingress queues
7576 * @rxq: the max number of interruptless ingress queues
7577 * @tc: the PCI traffic class
7578 * @vi: the max number of virtual interfaces
7579 * @cmask: the channel access rights mask for the PF/VF
7580 * @pmask: the port access rights mask for the PF/VF
7581 * @nexact: the maximum number of exact MPS filters
7582 * @rcaps: read capabilities
7583 * @wxcaps: write/execute capabilities
7585 * Configures resource limits and capabilities for a physical or virtual
7588 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7589 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7590 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7591 unsigned int vi, unsigned int cmask, unsigned int pmask,
7592 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7594 struct fw_pfvf_cmd c;
7596 memset(&c, 0, sizeof(c));
7597 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7598 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7599 FW_PFVF_CMD_VFN_V(vf));
7600 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7601 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7602 FW_PFVF_CMD_NIQ_V(rxq));
7603 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7604 FW_PFVF_CMD_PMASK_V(pmask) |
7605 FW_PFVF_CMD_NEQ_V(txq));
7606 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7607 FW_PFVF_CMD_NVI_V(vi) |
7608 FW_PFVF_CMD_NEXACTF_V(nexact));
7609 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7610 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7611 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
7612 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7616 * t4_alloc_vi - allocate a virtual interface
7617 * @adap: the adapter
7618 * @mbox: mailbox to use for the FW command
7619 * @port: physical port associated with the VI
7620 * @pf: the PF owning the VI
7621 * @vf: the VF owning the VI
7622 * @nmac: number of MAC addresses needed (1 to 5)
7623 * @mac: the MAC addresses of the VI
7624 * @rss_size: size of RSS table slice associated with this VI
7625 * @vivld: the destination to store the VI Valid value.
7626 * @vin: the destination to store the VIN value.
7628 * Allocates a virtual interface for the given physical port. If @mac is
7629 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7630 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7631 * stored consecutively so the space needed is @nmac * 6 bytes.
7632 * Returns a negative error number or the non-negative VI id.
7634 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7635 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7636 unsigned int *rss_size, u8 *vivld, u8 *vin)
7641 memset(&c, 0, sizeof(c));
7642 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7643 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7644 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7645 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
7646 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
7649 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7654 memcpy(mac, c.mac, sizeof(c.mac));
7657 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7660 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7663 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7666 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7670 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7673 *vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16));
7676 *vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16));
7678 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
7682 * t4_free_vi - free a virtual interface
7683 * @adap: the adapter
7684 * @mbox: mailbox to use for the FW command
7685 * @pf: the PF owning the VI
7686 * @vf: the VF owning the VI
7687 * @viid: virtual interface identifiler
7689 * Free a previously allocated virtual interface.
7691 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7692 unsigned int vf, unsigned int viid)
7696 memset(&c, 0, sizeof(c));
7697 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7700 FW_VI_CMD_PFN_V(pf) |
7701 FW_VI_CMD_VFN_V(vf));
7702 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7703 c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7705 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7709 * t4_set_rxmode - set Rx properties of a virtual interface
7710 * @adap: the adapter
7711 * @mbox: mailbox to use for the FW command
7713 * @viid_mirror: the mirror VI id
7714 * @mtu: the new MTU or -1
7715 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7716 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7717 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7718 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7719 * @sleep_ok: if true we may sleep while awaiting command completion
7721 * Sets Rx properties of a virtual interface.
7723 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7724 unsigned int viid_mirror, int mtu, int promisc, int all_multi,
7725 int bcast, int vlanex, bool sleep_ok)
7727 struct fw_vi_rxmode_cmd c, c_mirror;
7730 /* convert to FW values */
7732 mtu = FW_RXMODE_MTU_NO_CHG;
7734 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
7736 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
7738 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
7740 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
7742 memset(&c, 0, sizeof(c));
7743 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7744 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7745 FW_VI_RXMODE_CMD_VIID_V(viid));
7746 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7748 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7749 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7750 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7751 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7752 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
7755 memcpy(&c_mirror, &c, sizeof(c_mirror));
7756 c_mirror.op_to_viid =
7757 cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7758 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7759 FW_VI_RXMODE_CMD_VIID_V(viid_mirror));
7762 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7767 ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror),
7774 * t4_free_encap_mac_filt - frees MPS entry at given index
7775 * @adap: the adapter
7777 * @idx: index of MPS entry to be freed
7778 * @sleep_ok: call is allowed to sleep
7780 * Frees the MPS entry at supplied index
7782 * Returns a negative error number or zero on success
7784 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
7785 int idx, bool sleep_ok)
7787 struct fw_vi_mac_exact *p;
7788 struct fw_vi_mac_cmd c;
7792 memset(&c, 0, sizeof(c));
7793 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7794 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7796 FW_VI_MAC_CMD_VIID_V(viid));
7797 exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
7798 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7802 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7803 FW_VI_MAC_CMD_IDX_V(idx));
7804 eth_zero_addr(p->macaddr);
7805 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7810 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
7811 * @adap: the adapter
7813 * @addr: the MAC address
7815 * @idx: index of the entry in mps tcam
7816 * @lookup_type: MAC address for inner (1) or outer (0) header
7817 * @port_id: the port index
7818 * @sleep_ok: call is allowed to sleep
7820 * Removes the mac entry at the specified index using raw mac interface.
7822 * Returns a negative error number on failure.
7824 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
7825 const u8 *addr, const u8 *mask, unsigned int idx,
7826 u8 lookup_type, u8 port_id, bool sleep_ok)
7828 struct fw_vi_mac_cmd c;
7829 struct fw_vi_mac_raw *p = &c.u.raw;
7832 memset(&c, 0, sizeof(c));
7833 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7834 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7836 FW_VI_MAC_CMD_VIID_V(viid));
7837 val = FW_CMD_LEN16_V(1) |
7838 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7839 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7840 FW_CMD_LEN16_V(val));
7842 p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
7843 FW_VI_MAC_ID_BASED_FREE);
7845 /* Lookup Type. Outer header: 0, Inner header: 1 */
7846 p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7847 DATAPORTNUM_V(port_id));
7848 /* Lookup mask and port mask */
7849 p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7850 DATAPORTNUM_V(DATAPORTNUM_M));
7852 /* Copy the address and the mask */
7853 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7854 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7856 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7860 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
7861 * @adap: the adapter
7863 * @addr: the MAC address
7865 * @vni: the VNI id for the tunnel protocol
7866 * @vni_mask: mask for the VNI id
7867 * @dip_hit: to enable DIP match for the MPS entry
7868 * @lookup_type: MAC address for inner (1) or outer (0) header
7869 * @sleep_ok: call is allowed to sleep
7871 * Allocates an MPS entry with specified MAC address and VNI value.
7873 * Returns a negative error number or the allocated index for this mac.
7875 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
7876 const u8 *addr, const u8 *mask, unsigned int vni,
7877 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
7880 struct fw_vi_mac_cmd c;
7881 struct fw_vi_mac_vni *p = c.u.exact_vni;
7885 memset(&c, 0, sizeof(c));
7886 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7887 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7888 FW_VI_MAC_CMD_VIID_V(viid));
7889 val = FW_CMD_LEN16_V(1) |
7890 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
7891 c.freemacs_to_len16 = cpu_to_be32(val);
7892 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7893 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
7894 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7895 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
7897 p->lookup_type_to_vni =
7898 cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
7899 FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
7900 FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
7901 p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
7902 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7904 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7909 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
7910 * @adap: the adapter
7912 * @addr: the MAC address
7914 * @idx: index at which to add this entry
7915 * @lookup_type: MAC address for inner (1) or outer (0) header
7916 * @port_id: the port index
7917 * @sleep_ok: call is allowed to sleep
7919 * Adds the mac entry at the specified index using raw mac interface.
7921 * Returns a negative error number or the allocated index for this mac.
7923 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
7924 const u8 *addr, const u8 *mask, unsigned int idx,
7925 u8 lookup_type, u8 port_id, bool sleep_ok)
7928 struct fw_vi_mac_cmd c;
7929 struct fw_vi_mac_raw *p = &c.u.raw;
7932 memset(&c, 0, sizeof(c));
7933 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7934 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7935 FW_VI_MAC_CMD_VIID_V(viid));
7936 val = FW_CMD_LEN16_V(1) |
7937 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7938 c.freemacs_to_len16 = cpu_to_be32(val);
7940 /* Specify that this is an inner mac address */
7941 p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
7943 /* Lookup Type. Outer header: 0, Inner header: 1 */
7944 p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7945 DATAPORTNUM_V(port_id));
7946 /* Lookup mask and port mask */
7947 p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7948 DATAPORTNUM_V(DATAPORTNUM_M));
7950 /* Copy the address and the mask */
7951 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7952 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7954 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7956 ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
7965 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7966 * @adap: the adapter
7967 * @mbox: mailbox to use for the FW command
7969 * @free: if true any existing filters for this VI id are first removed
7970 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7971 * @addr: the MAC address(es)
7972 * @idx: where to store the index of each allocated filter
7973 * @hash: pointer to hash address filter bitmap
7974 * @sleep_ok: call is allowed to sleep
7976 * Allocates an exact-match filter for each of the supplied addresses and
7977 * sets it to the corresponding address. If @idx is not %NULL it should
7978 * have at least @naddr entries, each of which will be set to the index of
7979 * the filter allocated for the corresponding MAC address. If a filter
7980 * could not be allocated for an address its index is set to 0xffff.
7981 * If @hash is not %NULL addresses that fail to allocate an exact filter
7982 * are hashed and update the hash filter bitmap pointed at by @hash.
7984 * Returns a negative error number or the number of filters allocated.
7986 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7987 unsigned int viid, bool free, unsigned int naddr,
7988 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7990 int offset, ret = 0;
7991 struct fw_vi_mac_cmd c;
7992 unsigned int nfilters = 0;
7993 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
7994 unsigned int rem = naddr;
7996 if (naddr > max_naddr)
7999 for (offset = 0; offset < naddr ; /**/) {
8000 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
8001 rem : ARRAY_SIZE(c.u.exact));
8002 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8003 u.exact[fw_naddr]), 16);
8004 struct fw_vi_mac_exact *p;
8007 memset(&c, 0, sizeof(c));
8008 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8011 FW_CMD_EXEC_V(free) |
8012 FW_VI_MAC_CMD_VIID_V(viid));
8013 c.freemacs_to_len16 =
8014 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
8015 FW_CMD_LEN16_V(len16));
8017 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8019 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
8020 FW_VI_MAC_CMD_IDX_V(
8021 FW_VI_MAC_ADD_MAC));
8022 memcpy(p->macaddr, addr[offset + i],
8023 sizeof(p->macaddr));
8026 /* It's okay if we run out of space in our MAC address arena.
8027 * Some of the addresses we submit may get stored so we need
8028 * to run through the reply to see what the results were ...
8030 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8031 if (ret && ret != -FW_ENOMEM)
8034 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8035 u16 index = FW_VI_MAC_CMD_IDX_G(
8036 be16_to_cpu(p->valid_to_idx));
8039 idx[offset + i] = (index >= max_naddr ?
8041 if (index < max_naddr)
8045 hash_mac_addr(addr[offset + i]));
8053 if (ret == 0 || ret == -FW_ENOMEM)
8059 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
8060 * @adap: the adapter
8061 * @mbox: mailbox to use for the FW command
8063 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8064 * @addr: the MAC address(es)
8065 * @sleep_ok: call is allowed to sleep
8067 * Frees the exact-match filter for each of the supplied addresses
8069 * Returns a negative error number or the number of filters freed.
8071 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8072 unsigned int viid, unsigned int naddr,
8073 const u8 **addr, bool sleep_ok)
8075 int offset, ret = 0;
8076 struct fw_vi_mac_cmd c;
8077 unsigned int nfilters = 0;
8078 unsigned int max_naddr = is_t4(adap->params.chip) ?
8079 NUM_MPS_CLS_SRAM_L_INSTANCES :
8080 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8081 unsigned int rem = naddr;
8083 if (naddr > max_naddr)
8086 for (offset = 0; offset < (int)naddr ; /**/) {
8087 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8089 : ARRAY_SIZE(c.u.exact));
8090 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8091 u.exact[fw_naddr]), 16);
8092 struct fw_vi_mac_exact *p;
8095 memset(&c, 0, sizeof(c));
8096 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8100 FW_VI_MAC_CMD_VIID_V(viid));
8101 c.freemacs_to_len16 =
8102 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
8103 FW_CMD_LEN16_V(len16));
8105 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8106 p->valid_to_idx = cpu_to_be16(
8107 FW_VI_MAC_CMD_VALID_F |
8108 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
8109 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8112 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8116 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8117 u16 index = FW_VI_MAC_CMD_IDX_G(
8118 be16_to_cpu(p->valid_to_idx));
8120 if (index < max_naddr)
8134 * t4_change_mac - modifies the exact-match filter for a MAC address
8135 * @adap: the adapter
8136 * @mbox: mailbox to use for the FW command
8138 * @idx: index of existing filter for old value of MAC address, or -1
8139 * @addr: the new MAC address value
8140 * @persist: whether a new MAC allocation should be persistent
8141 * @smt_idx: the destination to store the new SMT index.
8143 * Modifies an exact-match filter and sets it to the new MAC address.
8144 * Note that in general it is not possible to modify the value of a given
8145 * filter so the generic way to modify an address filter is to free the one
8146 * being used by the old address value and allocate a new filter for the
8147 * new address value. @idx can be -1 if the address is a new addition.
8149 * Returns a negative error number or the index of the filter with the new
8152 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8153 int idx, const u8 *addr, bool persist, u8 *smt_idx)
8156 struct fw_vi_mac_cmd c;
8157 struct fw_vi_mac_exact *p = c.u.exact;
8158 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
8160 if (idx < 0) /* new allocation */
8161 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8162 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8164 memset(&c, 0, sizeof(c));
8165 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8166 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8167 FW_VI_MAC_CMD_VIID_V(viid));
8168 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
8169 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
8170 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
8171 FW_VI_MAC_CMD_IDX_V(idx));
8172 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8174 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8176 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
8177 if (ret >= max_mac_addr)
8180 if (adap->params.viid_smt_extn_support) {
8181 *smt_idx = FW_VI_MAC_CMD_SMTID_G
8182 (be32_to_cpu(c.op_to_viid));
8184 /* In T4/T5, SMT contains 256 SMAC entries
8185 * organized in 128 rows of 2 entries each.
8186 * In T6, SMT contains 256 SMAC entries in
8189 if (CHELSIO_CHIP_VERSION(adap->params.chip) <=
8191 *smt_idx = (viid & FW_VIID_VIN_M) << 1;
8193 *smt_idx = (viid & FW_VIID_VIN_M);
8201 * t4_set_addr_hash - program the MAC inexact-match hash filter
8202 * @adap: the adapter
8203 * @mbox: mailbox to use for the FW command
8205 * @ucast: whether the hash filter should also match unicast addresses
8206 * @vec: the value to be written to the hash filter
8207 * @sleep_ok: call is allowed to sleep
8209 * Sets the 64-bit inexact-match hash filter for a virtual interface.
8211 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8212 bool ucast, u64 vec, bool sleep_ok)
8214 struct fw_vi_mac_cmd c;
8216 memset(&c, 0, sizeof(c));
8217 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8218 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8219 FW_VI_ENABLE_CMD_VIID_V(viid));
8220 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
8221 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
8223 c.u.hash.hashvec = cpu_to_be64(vec);
8224 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8228 * t4_enable_vi_params - enable/disable a virtual interface
8229 * @adap: the adapter
8230 * @mbox: mailbox to use for the FW command
8232 * @rx_en: 1=enable Rx, 0=disable Rx
8233 * @tx_en: 1=enable Tx, 0=disable Tx
8234 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8236 * Enables/disables a virtual interface. Note that setting DCB Enable
8237 * only makes sense when enabling a Virtual Interface ...
8239 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8240 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8242 struct fw_vi_enable_cmd c;
8244 memset(&c, 0, sizeof(c));
8245 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8246 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8247 FW_VI_ENABLE_CMD_VIID_V(viid));
8248 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
8249 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
8250 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
8252 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8256 * t4_enable_vi - enable/disable a virtual interface
8257 * @adap: the adapter
8258 * @mbox: mailbox to use for the FW command
8260 * @rx_en: 1=enable Rx, 0=disable Rx
8261 * @tx_en: 1=enable Tx, 0=disable Tx
8263 * Enables/disables a virtual interface.
8265 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8266 bool rx_en, bool tx_en)
8268 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8272 * t4_enable_pi_params - enable/disable a Port's Virtual Interface
8273 * @adap: the adapter
8274 * @mbox: mailbox to use for the FW command
8275 * @pi: the Port Information structure
8276 * @rx_en: 1=enable Rx, 0=disable Rx
8277 * @tx_en: 1=enable Tx, 0=disable Tx
8278 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8280 * Enables/disables a Port's Virtual Interface. Note that setting DCB
8281 * Enable only makes sense when enabling a Virtual Interface ...
8282 * If the Virtual Interface enable/disable operation is successful,
8283 * we notify the OS-specific code of a potential Link Status change
8284 * via the OS Contract API t4_os_link_changed().
8286 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
8287 struct port_info *pi,
8288 bool rx_en, bool tx_en, bool dcb_en)
8290 int ret = t4_enable_vi_params(adap, mbox, pi->viid,
8291 rx_en, tx_en, dcb_en);
8294 t4_os_link_changed(adap, pi->port_id,
8295 rx_en && tx_en && pi->link_cfg.link_ok);
8300 * t4_identify_port - identify a VI's port by blinking its LED
8301 * @adap: the adapter
8302 * @mbox: mailbox to use for the FW command
8304 * @nblinks: how many times to blink LED at 2.5 Hz
8306 * Identifies a VI's port by blinking its LED.
8308 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8309 unsigned int nblinks)
8311 struct fw_vi_enable_cmd c;
8313 memset(&c, 0, sizeof(c));
8314 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8315 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8316 FW_VI_ENABLE_CMD_VIID_V(viid));
8317 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
8318 c.blinkdur = cpu_to_be16(nblinks);
8319 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8323 * t4_iq_stop - stop an ingress queue and its FLs
8324 * @adap: the adapter
8325 * @mbox: mailbox to use for the FW command
8326 * @pf: the PF owning the queues
8327 * @vf: the VF owning the queues
8328 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8329 * @iqid: ingress queue id
8330 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8331 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8333 * Stops an ingress queue and its associated FLs, if any. This causes
8334 * any current or future data/messages destined for these queues to be
8337 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8338 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8339 unsigned int fl0id, unsigned int fl1id)
8343 memset(&c, 0, sizeof(c));
8344 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8345 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8346 FW_IQ_CMD_VFN_V(vf));
8347 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
8348 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8349 c.iqid = cpu_to_be16(iqid);
8350 c.fl0id = cpu_to_be16(fl0id);
8351 c.fl1id = cpu_to_be16(fl1id);
8352 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8356 * t4_iq_free - free an ingress queue and its FLs
8357 * @adap: the adapter
8358 * @mbox: mailbox to use for the FW command
8359 * @pf: the PF owning the queues
8360 * @vf: the VF owning the queues
8361 * @iqtype: the ingress queue type
8362 * @iqid: ingress queue id
8363 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8364 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8366 * Frees an ingress queue and its associated FLs, if any.
8368 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8369 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8370 unsigned int fl0id, unsigned int fl1id)
8374 memset(&c, 0, sizeof(c));
8375 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8376 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8377 FW_IQ_CMD_VFN_V(vf));
8378 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
8379 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8380 c.iqid = cpu_to_be16(iqid);
8381 c.fl0id = cpu_to_be16(fl0id);
8382 c.fl1id = cpu_to_be16(fl1id);
8383 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8387 * t4_eth_eq_free - free an Ethernet egress queue
8388 * @adap: the adapter
8389 * @mbox: mailbox to use for the FW command
8390 * @pf: the PF owning the queue
8391 * @vf: the VF owning the queue
8392 * @eqid: egress queue id
8394 * Frees an Ethernet egress queue.
8396 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8397 unsigned int vf, unsigned int eqid)
8399 struct fw_eq_eth_cmd c;
8401 memset(&c, 0, sizeof(c));
8402 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
8403 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8404 FW_EQ_ETH_CMD_PFN_V(pf) |
8405 FW_EQ_ETH_CMD_VFN_V(vf));
8406 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
8407 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
8408 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8412 * t4_ctrl_eq_free - free a control egress queue
8413 * @adap: the adapter
8414 * @mbox: mailbox to use for the FW command
8415 * @pf: the PF owning the queue
8416 * @vf: the VF owning the queue
8417 * @eqid: egress queue id
8419 * Frees a control egress queue.
8421 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8422 unsigned int vf, unsigned int eqid)
8424 struct fw_eq_ctrl_cmd c;
8426 memset(&c, 0, sizeof(c));
8427 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
8428 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8429 FW_EQ_CTRL_CMD_PFN_V(pf) |
8430 FW_EQ_CTRL_CMD_VFN_V(vf));
8431 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
8432 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
8433 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8437 * t4_ofld_eq_free - free an offload egress queue
8438 * @adap: the adapter
8439 * @mbox: mailbox to use for the FW command
8440 * @pf: the PF owning the queue
8441 * @vf: the VF owning the queue
8442 * @eqid: egress queue id
8444 * Frees a control egress queue.
8446 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8447 unsigned int vf, unsigned int eqid)
8449 struct fw_eq_ofld_cmd c;
8451 memset(&c, 0, sizeof(c));
8452 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
8453 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8454 FW_EQ_OFLD_CMD_PFN_V(pf) |
8455 FW_EQ_OFLD_CMD_VFN_V(vf));
8456 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
8457 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
8458 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8462 * t4_link_down_rc_str - return a string for a Link Down Reason Code
8463 * @link_down_rc: Link Down Reason Code
8465 * Returns a string representation of the Link Down Reason Code.
8467 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
8469 static const char * const reason[] = {
8472 "Auto-negotiation Failure",
8474 "Insufficient Airflow",
8475 "Unable To Determine Reason",
8476 "No RX Signal Detected",
8480 if (link_down_rc >= ARRAY_SIZE(reason))
8481 return "Bad Reason Code";
8483 return reason[link_down_rc];
8486 /* Return the highest speed set in the port capabilities, in Mb/s. */
8487 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
8489 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8491 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8495 TEST_SPEED_RETURN(400G, 400000);
8496 TEST_SPEED_RETURN(200G, 200000);
8497 TEST_SPEED_RETURN(100G, 100000);
8498 TEST_SPEED_RETURN(50G, 50000);
8499 TEST_SPEED_RETURN(40G, 40000);
8500 TEST_SPEED_RETURN(25G, 25000);
8501 TEST_SPEED_RETURN(10G, 10000);
8502 TEST_SPEED_RETURN(1G, 1000);
8503 TEST_SPEED_RETURN(100M, 100);
8505 #undef TEST_SPEED_RETURN
8511 * fwcap_to_fwspeed - return highest speed in Port Capabilities
8512 * @acaps: advertised Port Capabilities
8514 * Get the highest speed for the port from the advertised Port
8515 * Capabilities. It will be either the highest speed from the list of
8516 * speeds or whatever user has set using ethtool.
8518 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
8520 #define TEST_SPEED_RETURN(__caps_speed) \
8522 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8523 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8526 TEST_SPEED_RETURN(400G);
8527 TEST_SPEED_RETURN(200G);
8528 TEST_SPEED_RETURN(100G);
8529 TEST_SPEED_RETURN(50G);
8530 TEST_SPEED_RETURN(40G);
8531 TEST_SPEED_RETURN(25G);
8532 TEST_SPEED_RETURN(10G);
8533 TEST_SPEED_RETURN(1G);
8534 TEST_SPEED_RETURN(100M);
8536 #undef TEST_SPEED_RETURN
8542 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8543 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8545 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8546 * 32-bit Port Capabilities value.
8548 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
8550 fw_port_cap32_t linkattr = 0;
8552 /* Unfortunately the format of the Link Status in the old
8553 * 16-bit Port Information message isn't the same as the
8554 * 16-bit Port Capabilities bitfield used everywhere else ...
8556 if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8557 linkattr |= FW_PORT_CAP32_FC_RX;
8558 if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8559 linkattr |= FW_PORT_CAP32_FC_TX;
8560 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8561 linkattr |= FW_PORT_CAP32_SPEED_100M;
8562 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8563 linkattr |= FW_PORT_CAP32_SPEED_1G;
8564 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8565 linkattr |= FW_PORT_CAP32_SPEED_10G;
8566 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8567 linkattr |= FW_PORT_CAP32_SPEED_25G;
8568 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8569 linkattr |= FW_PORT_CAP32_SPEED_40G;
8570 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8571 linkattr |= FW_PORT_CAP32_SPEED_100G;
8577 * t4_handle_get_port_info - process a FW reply message
8578 * @pi: the port info
8579 * @rpl: start of the FW message
8581 * Processes a GET_PORT_INFO FW reply message.
8583 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8585 const struct fw_port_cmd *cmd = (const void *)rpl;
8586 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8587 struct link_config *lc = &pi->link_cfg;
8588 struct adapter *adapter = pi->adapter;
8589 unsigned int speed, fc, fec, adv_fc;
8590 enum fw_port_module_type mod_type;
8591 int action, link_ok, linkdnrc;
8592 enum fw_port_type port_type;
8594 /* Extract the various fields from the Port Information message.
8596 action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8598 case FW_PORT_ACTION_GET_PORT_INFO: {
8599 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8601 link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8602 linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8603 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8604 mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8605 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8606 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8607 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8608 linkattr = lstatus_to_fwcap(lstatus);
8612 case FW_PORT_ACTION_GET_PORT_INFO32: {
8615 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8616 link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8617 linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8618 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8619 mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8620 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8621 acaps = be32_to_cpu(cmd->u.info32.acaps32);
8622 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8623 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8628 dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8629 be32_to_cpu(cmd->action_to_len16));
8633 fec = fwcap_to_cc_fec(acaps);
8634 adv_fc = fwcap_to_cc_pause(acaps);
8635 fc = fwcap_to_cc_pause(linkattr);
8636 speed = fwcap_to_speed(linkattr);
8638 /* Reset state for communicating new Transceiver Module status and
8639 * whether the OS-dependent layer wants us to redo the current
8640 * "sticky" L1 Configure Link Parameters.
8642 lc->new_module = false;
8643 lc->redo_l1cfg = false;
8645 if (mod_type != pi->mod_type) {
8646 /* With the newer SFP28 and QSFP28 Transceiver Module Types,
8647 * various fundamental Port Capabilities which used to be
8648 * immutable can now change radically. We can now have
8649 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8650 * all change based on what Transceiver Module is inserted.
8651 * So we need to record the Physical "Port" Capabilities on
8652 * every Transceiver Module change.
8656 /* When a new Transceiver Module is inserted, the Firmware
8657 * will examine its i2c EPROM to determine its type and
8658 * general operating parameters including things like Forward
8659 * Error Control, etc. Various IEEE 802.3 standards dictate
8660 * how to interpret these i2c values to determine default
8661 * "sutomatic" settings. We record these for future use when
8662 * the user explicitly requests these standards-based values.
8664 lc->def_acaps = acaps;
8666 /* Some versions of the early T6 Firmware "cheated" when
8667 * handling different Transceiver Modules by changing the
8668 * underlaying Port Type reported to the Host Drivers. As
8669 * such we need to capture whatever Port Type the Firmware
8670 * sends us and record it in case it's different from what we
8671 * were told earlier. Unfortunately, since Firmware is
8672 * forever, we'll need to keep this code here forever, but in
8673 * later T6 Firmware it should just be an assignment of the
8674 * same value already recorded.
8676 pi->port_type = port_type;
8678 /* Record new Module Type information.
8680 pi->mod_type = mod_type;
8682 /* Let the OS-dependent layer know if we have a new
8683 * Transceiver Module inserted.
8685 lc->new_module = t4_is_inserted_mod_type(mod_type);
8687 t4_os_portmod_changed(adapter, pi->port_id);
8690 if (link_ok != lc->link_ok || speed != lc->speed ||
8691 fc != lc->fc || adv_fc != lc->advertised_fc ||
8693 /* something changed */
8694 if (!link_ok && lc->link_ok) {
8695 lc->link_down_rc = linkdnrc;
8696 dev_warn_ratelimited(adapter->pdev_dev,
8697 "Port %d link down, reason: %s\n",
8699 t4_link_down_rc_str(linkdnrc));
8701 lc->link_ok = link_ok;
8703 lc->advertised_fc = adv_fc;
8707 lc->lpacaps = lpacaps;
8708 lc->acaps = acaps & ADVERT_MASK;
8710 /* If we're not physically capable of Auto-Negotiation, note
8711 * this as Auto-Negotiation disabled. Otherwise, we track
8712 * what Auto-Negotiation settings we have. Note parallel
8713 * structure in t4_link_l1cfg_core() and init_link_config().
8715 if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
8716 lc->autoneg = AUTONEG_DISABLE;
8717 } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
8718 lc->autoneg = AUTONEG_ENABLE;
8720 /* When Autoneg is disabled, user needs to set
8722 * Similar to cxgb4_ethtool.c: set_link_ksettings
8725 lc->speed_caps = fwcap_to_fwspeed(acaps);
8726 lc->autoneg = AUTONEG_DISABLE;
8729 t4_os_link_changed(adapter, pi->port_id, link_ok);
8732 /* If we have a new Transceiver Module and the OS-dependent code has
8733 * told us that it wants us to redo whatever "sticky" L1 Configuration
8734 * Link Parameters are set, do that now.
8736 if (lc->new_module && lc->redo_l1cfg) {
8737 struct link_config old_lc;
8740 /* Save the current L1 Configuration and restore it if an
8741 * error occurs. We probably should fix the l1_cfg*()
8742 * routines not to change the link_config when an error
8746 ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
8749 dev_warn(adapter->pdev_dev,
8750 "Attempt to update new Transceiver Module settings failed\n");
8753 lc->new_module = false;
8754 lc->redo_l1cfg = false;
8758 * t4_update_port_info - retrieve and update port information if changed
8759 * @pi: the port_info
8761 * We issue a Get Port Information Command to the Firmware and, if
8762 * successful, we check to see if anything is different from what we
8763 * last recorded and update things accordingly.
8765 int t4_update_port_info(struct port_info *pi)
8767 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8768 struct fw_port_cmd port_cmd;
8771 memset(&port_cmd, 0, sizeof(port_cmd));
8772 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8773 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8774 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8775 port_cmd.action_to_len16 = cpu_to_be32(
8776 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8777 ? FW_PORT_ACTION_GET_PORT_INFO
8778 : FW_PORT_ACTION_GET_PORT_INFO32) |
8779 FW_LEN16(port_cmd));
8780 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8781 &port_cmd, sizeof(port_cmd), &port_cmd);
8785 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8790 * t4_get_link_params - retrieve basic link parameters for given port
8792 * @link_okp: value return pointer for link up/down
8793 * @speedp: value return pointer for speed (Mb/s)
8794 * @mtup: value return pointer for mtu
8796 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8797 * and MTU for a specified port. A negative error is returned on
8798 * failure; 0 on success.
8800 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8801 unsigned int *speedp, unsigned int *mtup)
8803 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8804 unsigned int action, link_ok, mtu;
8805 struct fw_port_cmd port_cmd;
8806 fw_port_cap32_t linkattr;
8809 memset(&port_cmd, 0, sizeof(port_cmd));
8810 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8811 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8812 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8813 action = (fw_caps == FW_CAPS16
8814 ? FW_PORT_ACTION_GET_PORT_INFO
8815 : FW_PORT_ACTION_GET_PORT_INFO32);
8816 port_cmd.action_to_len16 = cpu_to_be32(
8817 FW_PORT_CMD_ACTION_V(action) |
8818 FW_LEN16(port_cmd));
8819 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8820 &port_cmd, sizeof(port_cmd), &port_cmd);
8824 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8825 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8827 link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8828 linkattr = lstatus_to_fwcap(lstatus);
8829 mtu = be16_to_cpu(port_cmd.u.info.mtu);
8832 be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8834 link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8835 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8836 mtu = FW_PORT_CMD_MTU32_G(
8837 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8841 *link_okp = link_ok;
8843 *speedp = fwcap_to_speed(linkattr);
8851 * t4_handle_fw_rpl - process a FW reply message
8852 * @adap: the adapter
8853 * @rpl: start of the FW message
8855 * Processes a FW message, such as link state change messages.
8857 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8859 u8 opcode = *(const u8 *)rpl;
8861 /* This might be a port command ... this simplifies the following
8862 * conditionals ... We can get away with pre-dereferencing
8863 * action_to_len16 because it's in the first 16 bytes and all messages
8864 * will be at least that long.
8866 const struct fw_port_cmd *p = (const void *)rpl;
8867 unsigned int action =
8868 FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8870 if (opcode == FW_PORT_CMD &&
8871 (action == FW_PORT_ACTION_GET_PORT_INFO ||
8872 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8874 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
8875 struct port_info *pi = NULL;
8877 for_each_port(adap, i) {
8878 pi = adap2pinfo(adap, i);
8879 if (pi->tx_chan == chan)
8883 t4_handle_get_port_info(pi, rpl);
8885 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8892 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
8896 if (pci_is_pcie(adapter->pdev)) {
8897 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
8898 p->speed = val & PCI_EXP_LNKSTA_CLS;
8899 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8904 * init_link_config - initialize a link's SW state
8905 * @lc: pointer to structure holding the link state
8906 * @pcaps: link Port Capabilities
8907 * @acaps: link current Advertised Port Capabilities
8909 * Initializes the SW state maintained for each link, including the link's
8910 * capabilities and default speed/flow-control/autonegotiation settings.
8912 static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8913 fw_port_cap32_t acaps)
8916 lc->def_acaps = acaps;
8920 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8922 /* For Forward Error Control, we default to whatever the Firmware
8923 * tells us the Link is currently advertising.
8925 lc->requested_fec = FEC_AUTO;
8926 lc->fec = fwcap_to_cc_fec(lc->def_acaps);
8928 /* If the Port is capable of Auto-Negtotiation, initialize it as
8929 * "enabled" and copy over all of the Physical Port Capabilities
8930 * to the Advertised Port Capabilities. Otherwise mark it as
8931 * Auto-Negotiate disabled and select the highest supported speed
8932 * for the link. Note parallel structure in t4_link_l1cfg_core()
8933 * and t4_handle_get_port_info().
8935 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8936 lc->acaps = lc->pcaps & ADVERT_MASK;
8937 lc->autoneg = AUTONEG_ENABLE;
8938 lc->requested_fc |= PAUSE_AUTONEG;
8941 lc->autoneg = AUTONEG_DISABLE;
8942 lc->speed_caps = fwcap_to_fwspeed(acaps);
8946 #define CIM_PF_NOACCESS 0xeeeeeeee
8948 int t4_wait_dev_ready(void __iomem *regs)
8952 whoami = readl(regs + PL_WHOAMI_A);
8953 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
8957 whoami = readl(regs + PL_WHOAMI_A);
8958 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
8962 u32 vendor_and_model_id;
8966 static int t4_get_flash_params(struct adapter *adap)
8968 /* Table for non-Numonix supported flash parts. Numonix parts are left
8969 * to the preexisting code. All flash parts have 64KB sectors.
8971 static struct flash_desc supported_flash[] = {
8972 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8975 unsigned int part, manufacturer;
8976 unsigned int density, size = 0;
8980 /* Issue a Read ID Command to the Flash part. We decode supported
8981 * Flash parts and their sizes from this. There's a newer Query
8982 * Command which can retrieve detailed geometry information but many
8983 * Flash parts don't support it.
8986 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
8988 ret = sf1_read(adap, 3, 0, 1, &flashid);
8989 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
8993 /* Check to see if it's one of our non-standard supported Flash parts.
8995 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8996 if (supported_flash[part].vendor_and_model_id == flashid) {
8997 adap->params.sf_size = supported_flash[part].size_mb;
8998 adap->params.sf_nsec =
8999 adap->params.sf_size / SF_SEC_SIZE;
9003 /* Decode Flash part size. The code below looks repetitive with
9004 * common encodings, but that's not guaranteed in the JEDEC
9005 * specification for the Read JEDEC ID command. The only thing that
9006 * we're guaranteed by the JEDEC specification is where the
9007 * Manufacturer ID is in the returned result. After that each
9008 * Manufacturer ~could~ encode things completely differently.
9009 * Note, all Flash parts must have 64KB sectors.
9011 manufacturer = flashid & 0xff;
9012 switch (manufacturer) {
9013 case 0x20: { /* Micron/Numonix */
9014 /* This Density -> Size decoding table is taken from Micron
9017 density = (flashid >> 16) & 0xff;
9019 case 0x14: /* 1MB */
9022 case 0x15: /* 2MB */
9025 case 0x16: /* 4MB */
9028 case 0x17: /* 8MB */
9031 case 0x18: /* 16MB */
9034 case 0x19: /* 32MB */
9037 case 0x20: /* 64MB */
9040 case 0x21: /* 128MB */
9043 case 0x22: /* 256MB */
9049 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
9050 /* This Density -> Size decoding table is taken from ISSI
9053 density = (flashid >> 16) & 0xff;
9055 case 0x16: /* 32 MB */
9058 case 0x17: /* 64MB */
9064 case 0xc2: { /* Macronix */
9065 /* This Density -> Size decoding table is taken from Macronix
9068 density = (flashid >> 16) & 0xff;
9070 case 0x17: /* 8MB */
9073 case 0x18: /* 16MB */
9079 case 0xef: { /* Winbond */
9080 /* This Density -> Size decoding table is taken from Winbond
9083 density = (flashid >> 16) & 0xff;
9085 case 0x17: /* 8MB */
9088 case 0x18: /* 16MB */
9096 /* If we didn't recognize the FLASH part, that's no real issue: the
9097 * Hardware/Software contract says that Hardware will _*ALWAYS*_
9098 * use a FLASH part which is at least 4MB in size and has 64KB
9099 * sectors. The unrecognized FLASH part is likely to be much larger
9100 * than 4MB, but that's all we really need.
9103 dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
9108 /* Store decoded Flash size and fall through into vetting code. */
9109 adap->params.sf_size = size;
9110 adap->params.sf_nsec = size / SF_SEC_SIZE;
9113 if (adap->params.sf_size < FLASH_MIN_SIZE)
9114 dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9115 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
9120 * t4_prep_adapter - prepare SW and HW for operation
9121 * @adapter: the adapter
9123 * Initialize adapter SW state for the various HW modules, set initial
9124 * values for some adapter tunables, take PHYs out of reset, and
9125 * initialize the MDIO interface.
9127 int t4_prep_adapter(struct adapter *adapter)
9133 get_pci_mode(adapter, &adapter->params.pci);
9134 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
9136 ret = t4_get_flash_params(adapter);
9138 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
9142 /* Retrieve adapter's device ID
9144 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
9145 ver = device_id >> 12;
9146 adapter->params.chip = 0;
9149 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
9150 adapter->params.arch.sge_fl_db = DBPRIO_F;
9151 adapter->params.arch.mps_tcam_size =
9152 NUM_MPS_CLS_SRAM_L_INSTANCES;
9153 adapter->params.arch.mps_rplc_size = 128;
9154 adapter->params.arch.nchan = NCHAN;
9155 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9156 adapter->params.arch.vfcount = 128;
9157 /* Congestion map is for 4 channels so that
9158 * MPS can have 4 priority per port.
9160 adapter->params.arch.cng_ch_bits_log = 2;
9163 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
9164 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
9165 adapter->params.arch.mps_tcam_size =
9166 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9167 adapter->params.arch.mps_rplc_size = 128;
9168 adapter->params.arch.nchan = NCHAN;
9169 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9170 adapter->params.arch.vfcount = 128;
9171 adapter->params.arch.cng_ch_bits_log = 2;
9174 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
9175 adapter->params.arch.sge_fl_db = 0;
9176 adapter->params.arch.mps_tcam_size =
9177 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9178 adapter->params.arch.mps_rplc_size = 256;
9179 adapter->params.arch.nchan = 2;
9180 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
9181 adapter->params.arch.vfcount = 256;
9182 /* Congestion map will be for 2 channels so that
9183 * MPS can have 8 priority per port.
9185 adapter->params.arch.cng_ch_bits_log = 3;
9188 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
9193 adapter->params.cim_la_size = CIMLA_SIZE;
9194 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
9197 * Default port for debugging in case we can't reach FW.
9199 adapter->params.nports = 1;
9200 adapter->params.portvec = 1;
9201 adapter->params.vpd.cclk = 50000;
9203 /* Set PCIe completion timeout to 4 seconds. */
9204 pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
9205 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
9210 * t4_shutdown_adapter - shut down adapter, host & wire
9211 * @adapter: the adapter
9213 * Perform an emergency shutdown of the adapter and stop it from
9214 * continuing any further communication on the ports or DMA to the
9215 * host. This is typically used when the adapter and/or firmware
9216 * have crashed and we want to prevent any further accidental
9217 * communication with the rest of the world. This will also force
9218 * the port Link Status to go down -- if register writes work --
9219 * which should help our peers figure out that we're down.
9221 int t4_shutdown_adapter(struct adapter *adapter)
9225 t4_intr_disable(adapter);
9226 t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
9227 for_each_port(adapter, port) {
9228 u32 a_port_cfg = is_t4(adapter->params.chip) ?
9229 PORT_REG(port, XGMAC_PORT_CFG_A) :
9230 T5_PORT_REG(port, MAC_PORT_CFG_A);
9232 t4_write_reg(adapter, a_port_cfg,
9233 t4_read_reg(adapter, a_port_cfg)
9234 & ~SIGNAL_DET_V(1));
9236 t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
9242 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9243 * @adapter: the adapter
9244 * @qid: the Queue ID
9245 * @qtype: the Ingress or Egress type for @qid
9246 * @user: true if this request is for a user mode queue
9247 * @pbar2_qoffset: BAR2 Queue Offset
9248 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9250 * Returns the BAR2 SGE Queue Registers information associated with the
9251 * indicated Absolute Queue ID. These are passed back in return value
9252 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9253 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9255 * This may return an error which indicates that BAR2 SGE Queue
9256 * registers aren't available. If an error is not returned, then the
9257 * following values are returned:
9259 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9260 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9262 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9263 * require the "Inferred Queue ID" ability may be used. E.g. the
9264 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9265 * then these "Inferred Queue ID" register may not be used.
9267 int t4_bar2_sge_qregs(struct adapter *adapter,
9269 enum t4_bar2_qtype qtype,
9272 unsigned int *pbar2_qid)
9274 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9275 u64 bar2_page_offset, bar2_qoffset;
9276 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9278 /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
9279 if (!user && is_t4(adapter->params.chip))
9282 /* Get our SGE Page Size parameters.
9284 page_shift = adapter->params.sge.hps + 10;
9285 page_size = 1 << page_shift;
9287 /* Get the right Queues per Page parameters for our Queue.
9289 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9290 ? adapter->params.sge.eq_qpp
9291 : adapter->params.sge.iq_qpp);
9292 qpp_mask = (1 << qpp_shift) - 1;
9294 /* Calculate the basics of the BAR2 SGE Queue register area:
9295 * o The BAR2 page the Queue registers will be in.
9296 * o The BAR2 Queue ID.
9297 * o The BAR2 Queue ID Offset into the BAR2 page.
9299 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9300 bar2_qid = qid & qpp_mask;
9301 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9303 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
9304 * hardware will infer the Absolute Queue ID simply from the writes to
9305 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9306 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
9307 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9308 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9309 * from the BAR2 Page and BAR2 Queue ID.
9311 * One important censequence of this is that some BAR2 SGE registers
9312 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9313 * there. But other registers synthesize the SGE Queue ID purely
9314 * from the writes to the registers -- the Write Combined Doorbell
9315 * Buffer is a good example. These BAR2 SGE Registers are only
9316 * available for those BAR2 SGE Register areas where the SGE Absolute
9317 * Queue ID can be inferred from simple writes.
9319 bar2_qoffset = bar2_page_offset;
9320 bar2_qinferred = (bar2_qid_offset < page_size);
9321 if (bar2_qinferred) {
9322 bar2_qoffset += bar2_qid_offset;
9326 *pbar2_qoffset = bar2_qoffset;
9327 *pbar2_qid = bar2_qid;
9332 * t4_init_devlog_params - initialize adapter->params.devlog
9333 * @adap: the adapter
9335 * Initialize various fields of the adapter's Firmware Device Log
9336 * Parameters structure.
9338 int t4_init_devlog_params(struct adapter *adap)
9340 struct devlog_params *dparams = &adap->params.devlog;
9342 unsigned int devlog_meminfo;
9343 struct fw_devlog_cmd devlog_cmd;
9346 /* If we're dealing with newer firmware, the Device Log Parameters
9347 * are stored in a designated register which allows us to access the
9348 * Device Log even if we can't talk to the firmware.
9351 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
9353 unsigned int nentries, nentries128;
9355 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
9356 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
9358 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
9359 nentries = (nentries128 + 1) * 128;
9360 dparams->size = nentries * sizeof(struct fw_devlog_e);
9365 /* Otherwise, ask the firmware for it's Device Log Parameters.
9367 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9368 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
9369 FW_CMD_REQUEST_F | FW_CMD_READ_F);
9370 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9371 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9377 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9378 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
9379 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
9380 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9386 * t4_init_sge_params - initialize adap->params.sge
9387 * @adapter: the adapter
9389 * Initialize various fields of the adapter's SGE Parameters structure.
9391 int t4_init_sge_params(struct adapter *adapter)
9393 struct sge_params *sge_params = &adapter->params.sge;
9395 unsigned int s_hps, s_qpp;
9397 /* Extract the SGE Page Size for our PF.
9399 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
9400 s_hps = (HOSTPAGESIZEPF0_S +
9401 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
9402 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
9404 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
9406 s_qpp = (QUEUESPERPAGEPF0_S +
9407 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
9408 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
9409 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9410 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
9411 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9417 * t4_init_tp_params - initialize adap->params.tp
9418 * @adap: the adapter
9419 * @sleep_ok: if true we may sleep while awaiting command completion
9421 * Initialize various fields of the adapter's TP Parameters structure.
9423 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9429 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
9430 adap->params.tp.tre = TIMERRESOLUTION_G(v);
9431 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
9433 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9434 for (chan = 0; chan < NCHAN; chan++)
9435 adap->params.tp.tx_modq[chan] = chan;
9437 /* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
9440 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
9441 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FILTER) |
9442 FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_FILTER_MODE_MASK));
9444 /* Read current value */
9445 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
9448 dev_info(adap->pdev_dev,
9449 "Current filter mode/mask 0x%x:0x%x\n",
9450 FW_PARAMS_PARAM_FILTER_MODE_G(val),
9451 FW_PARAMS_PARAM_FILTER_MASK_G(val));
9452 adap->params.tp.vlan_pri_map =
9453 FW_PARAMS_PARAM_FILTER_MODE_G(val);
9454 adap->params.tp.filter_mask =
9455 FW_PARAMS_PARAM_FILTER_MASK_G(val);
9457 dev_info(adap->pdev_dev,
9458 "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
9460 /* Incase of older-fw (which doesn't expose the api
9461 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
9462 * the fw api) combination, fall-back to older method of reading
9463 * the filter mode from indirect-register
9465 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
9466 TP_VLAN_PRI_MAP_A, sleep_ok);
9468 /* With the older-fw and newer-driver combination we might run
9469 * into an issue when user wants to use hash filter region but
9470 * the filter_mask is zero, in this case filter_mask validation
9471 * is tough. To avoid that we set the filter_mask same as filter
9472 * mode, which will behave exactly as the older way of ignoring
9473 * the filter mask validation.
9475 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
9478 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
9479 TP_INGRESS_CONFIG_A, sleep_ok);
9481 /* For T6, cache the adapter's compressed error vector
9482 * and passing outer header info for encapsulated packets.
9484 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
9485 v = t4_read_reg(adap, TP_OUT_CONFIG_A);
9486 adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
9489 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9490 * shift positions of several elements of the Compressed Filter Tuple
9491 * for this adapter which we need frequently ...
9493 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
9494 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
9495 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
9496 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
9497 adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
9498 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
9500 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
9502 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
9504 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
9506 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
9509 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
9510 * represents the presence of an Outer VLAN instead of a VNIC ID.
9512 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
9513 adap->params.tp.vnic_shift = -1;
9515 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
9516 adap->params.tp.hash_filter_mask = v;
9517 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
9518 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
9523 * t4_filter_field_shift - calculate filter field shift
9524 * @adap: the adapter
9525 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9527 * Return the shift position of a filter field within the Compressed
9528 * Filter Tuple. The filter field is specified via its selection bit
9529 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
9531 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9533 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9537 if ((filter_mode & filter_sel) == 0)
9540 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9541 switch (filter_mode & sel) {
9543 field_shift += FT_FCOE_W;
9546 field_shift += FT_PORT_W;
9549 field_shift += FT_VNIC_ID_W;
9552 field_shift += FT_VLAN_W;
9555 field_shift += FT_TOS_W;
9558 field_shift += FT_PROTOCOL_W;
9561 field_shift += FT_ETHERTYPE_W;
9564 field_shift += FT_MACMATCH_W;
9567 field_shift += FT_MPSHITTYPE_W;
9569 case FRAGMENTATION_F:
9570 field_shift += FT_FRAGMENTATION_W;
9577 int t4_init_rss_mode(struct adapter *adap, int mbox)
9580 struct fw_rss_vi_config_cmd rvc;
9582 memset(&rvc, 0, sizeof(rvc));
9584 for_each_port(adap, i) {
9585 struct port_info *p = adap2pinfo(adap, i);
9588 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
9589 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9590 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
9591 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
9592 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9595 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
9601 * t4_init_portinfo - allocate a virtual interface and initialize port_info
9602 * @pi: the port_info
9603 * @mbox: mailbox to use for the FW command
9604 * @port: physical port associated with the VI
9605 * @pf: the PF owning the VI
9606 * @vf: the VF owning the VI
9607 * @mac: the MAC address of the VI
9609 * Allocates a virtual interface for the given physical port. If @mac is
9610 * not %NULL it contains the MAC address of the VI as assigned by FW.
9611 * @mac should be large enough to hold an Ethernet address.
9612 * Returns < 0 on error.
9614 int t4_init_portinfo(struct port_info *pi, int mbox,
9615 int port, int pf, int vf, u8 mac[])
9617 struct adapter *adapter = pi->adapter;
9618 unsigned int fw_caps = adapter->params.fw_caps_support;
9619 struct fw_port_cmd cmd;
9620 unsigned int rss_size;
9621 enum fw_port_type port_type;
9623 fw_port_cap32_t pcaps, acaps;
9624 u8 vivld = 0, vin = 0;
9627 /* If we haven't yet determined whether we're talking to Firmware
9628 * which knows the new 32-bit Port Capabilities, it's time to find
9629 * out now. This will also tell new Firmware to send us Port Status
9630 * Updates using the new 32-bit Port Capabilities version of the
9631 * Port Information message.
9633 if (fw_caps == FW_CAPS_UNKNOWN) {
9636 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
9637 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
9639 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val);
9640 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
9641 adapter->params.fw_caps_support = fw_caps;
9644 memset(&cmd, 0, sizeof(cmd));
9645 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
9646 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9647 FW_PORT_CMD_PORTID_V(port));
9648 cmd.action_to_len16 = cpu_to_be32(
9649 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9650 ? FW_PORT_ACTION_GET_PORT_INFO
9651 : FW_PORT_ACTION_GET_PORT_INFO32) |
9653 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
9657 /* Extract the various fields from the Port Information message.
9659 if (fw_caps == FW_CAPS16) {
9660 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9662 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9663 mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9664 ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9666 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9667 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9669 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9671 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9672 mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9673 ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9675 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9676 acaps = be32_to_cpu(cmd.u.info32.acaps32);
9679 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size,
9687 pi->rss_size = rss_size;
9688 pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
9690 /* If fw supports returning the VIN as part of FW_VI_CMD,
9691 * save the returned values.
9693 if (adapter->params.viid_smt_extn_support) {
9697 /* Retrieve the values from VIID */
9698 pi->vivld = FW_VIID_VIVLD_G(pi->viid);
9699 pi->vin = FW_VIID_VIN_G(pi->viid);
9702 pi->port_type = port_type;
9703 pi->mdio_addr = mdio_addr;
9704 pi->mod_type = FW_PORT_MOD_TYPE_NA;
9706 init_link_config(&pi->link_cfg, pcaps, acaps);
9710 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9715 for_each_port(adap, i) {
9716 struct port_info *pi = adap2pinfo(adap, i);
9718 while ((adap->params.portvec & (1 << j)) == 0)
9721 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9725 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
9731 int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
9736 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
9748 * t4_read_cimq_cfg - read CIM queue configuration
9749 * @adap: the adapter
9750 * @base: holds the queue base addresses in bytes
9751 * @size: holds the queue sizes in bytes
9752 * @thres: holds the queue full thresholds in bytes
9754 * Returns the current configuration of the CIM queues, starting with
9755 * the IBQs, then the OBQs.
9757 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9760 int cim_num_obq = is_t4(adap->params.chip) ?
9761 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9763 for (i = 0; i < CIM_NUM_IBQ; i++) {
9764 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9766 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9767 /* value is in 256-byte units */
9768 *base++ = CIMQBASE_G(v) * 256;
9769 *size++ = CIMQSIZE_G(v) * 256;
9770 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9772 for (i = 0; i < cim_num_obq; i++) {
9773 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9775 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9776 /* value is in 256-byte units */
9777 *base++ = CIMQBASE_G(v) * 256;
9778 *size++ = CIMQSIZE_G(v) * 256;
9783 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9784 * @adap: the adapter
9785 * @qid: the queue index
9786 * @data: where to store the queue contents
9787 * @n: capacity of @data in 32-bit words
9789 * Reads the contents of the selected CIM queue starting at address 0 up
9790 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9791 * error and the number of 32-bit words actually read on success.
9793 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9795 int i, err, attempts;
9797 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9799 if (qid > 5 || (n & 3))
9802 addr = qid * nwords;
9806 /* It might take 3-10ms before the IBQ debug read access is allowed.
9807 * Wait for 1 Sec with a delay of 1 usec.
9811 for (i = 0; i < n; i++, addr++) {
9812 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9814 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9818 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9820 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
9825 * t4_read_cim_obq - read the contents of a CIM outbound queue
9826 * @adap: the adapter
9827 * @qid: the queue index
9828 * @data: where to store the queue contents
9829 * @n: capacity of @data in 32-bit words
9831 * Reads the contents of the selected CIM queue starting at address 0 up
9832 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9833 * error and the number of 32-bit words actually read on success.
9835 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9838 unsigned int addr, v, nwords;
9839 int cim_num_obq = is_t4(adap->params.chip) ?
9840 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9842 if ((qid > (cim_num_obq - 1)) || (n & 3))
9845 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9846 QUENUMSELECT_V(qid));
9847 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9849 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
9850 nwords = CIMQSIZE_G(v) * 64; /* same */
9854 for (i = 0; i < n; i++, addr++) {
9855 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9857 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9861 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9863 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
9868 * t4_cim_read - read a block from CIM internal address space
9869 * @adap: the adapter
9870 * @addr: the start address within the CIM address space
9871 * @n: number of words to read
9872 * @valp: where to store the result
9874 * Reads a block of 4-byte words from the CIM intenal address space.
9876 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9881 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9884 for ( ; !ret && n--; addr += 4) {
9885 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9886 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9889 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9895 * t4_cim_write - write a block into CIM internal address space
9896 * @adap: the adapter
9897 * @addr: the start address within the CIM address space
9898 * @n: number of words to write
9899 * @valp: set of values to write
9901 * Writes a block of 4-byte words into the CIM intenal address space.
9903 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9904 const unsigned int *valp)
9908 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9911 for ( ; !ret && n--; addr += 4) {
9912 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9913 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9914 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9920 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9923 return t4_cim_write(adap, addr, 1, &val);
9927 * t4_cim_read_la - read CIM LA capture buffer
9928 * @adap: the adapter
9929 * @la_buf: where to store the LA data
9930 * @wrptr: the HW write pointer within the capture buffer
9932 * Reads the contents of the CIM LA buffer with the most recent entry at
9933 * the end of the returned data and with the entry at @wrptr first.
9934 * We try to leave the LA in the running state we find it in.
9936 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9939 unsigned int cfg, val, idx;
9941 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9945 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
9946 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9951 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9955 idx = UPDBGLAWRPTR_G(val);
9959 for (i = 0; i < adap->params.cim_la_size; i++) {
9960 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9961 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9964 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9967 if (val & UPDBGLARDEN_F) {
9971 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9975 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9976 * identify the 32-bit portion of the full 312-bit data
9978 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9979 idx = (idx & 0xff0) + 0x10;
9982 /* address can't exceed 0xfff */
9983 idx &= UPDBGLARDPTR_M;
9986 if (cfg & UPDBGLAEN_F) {
9987 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9988 cfg & ~UPDBGLARDEN_F);
9996 * t4_tp_read_la - read TP LA capture buffer
9997 * @adap: the adapter
9998 * @la_buf: where to store the LA data
9999 * @wrptr: the HW write pointer within the capture buffer
10001 * Reads the contents of the TP LA buffer with the most recent entry at
10002 * the end of the returned data and with the entry at @wrptr first.
10003 * We leave the LA in the running state we find it in.
10005 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
10007 bool last_incomplete;
10008 unsigned int i, cfg, val, idx;
10010 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
10011 if (cfg & DBGLAENABLE_F) /* freeze LA */
10012 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
10013 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
10015 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
10016 idx = DBGLAWPTR_G(val);
10017 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
10018 if (last_incomplete)
10019 idx = (idx + 1) & DBGLARPTR_M;
10024 val &= ~DBGLARPTR_V(DBGLARPTR_M);
10025 val |= adap->params.tp.la_mask;
10027 for (i = 0; i < TPLA_SIZE; i++) {
10028 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
10029 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
10030 idx = (idx + 1) & DBGLARPTR_M;
10033 /* Wipe out last entry if it isn't valid */
10034 if (last_incomplete)
10035 la_buf[TPLA_SIZE - 1] = ~0ULL;
10037 if (cfg & DBGLAENABLE_F) /* restore running state */
10038 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
10039 cfg | adap->params.tp.la_mask);
10042 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
10043 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
10044 * state for more than the Warning Threshold then we'll issue a warning about
10045 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
10046 * appears to be hung every Warning Repeat second till the situation clears.
10047 * If the situation clears, we'll note that as well.
10049 #define SGE_IDMA_WARN_THRESH 1
10050 #define SGE_IDMA_WARN_REPEAT 300
10053 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
10054 * @adapter: the adapter
10055 * @idma: the adapter IDMA Monitor state
10057 * Initialize the state of an SGE Ingress DMA Monitor.
10059 void t4_idma_monitor_init(struct adapter *adapter,
10060 struct sge_idma_monitor_state *idma)
10062 /* Initialize the state variables for detecting an SGE Ingress DMA
10063 * hang. The SGE has internal counters which count up on each clock
10064 * tick whenever the SGE finds its Ingress DMA State Engines in the
10065 * same state they were on the previous clock tick. The clock used is
10066 * the Core Clock so we have a limit on the maximum "time" they can
10067 * record; typically a very small number of seconds. For instance,
10068 * with a 600MHz Core Clock, we can only count up to a bit more than
10069 * 7s. So we'll synthesize a larger counter in order to not run the
10070 * risk of having the "timers" overflow and give us the flexibility to
10071 * maintain a Hung SGE State Machine of our own which operates across
10072 * a longer time frame.
10074 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
10075 idma->idma_stalled[0] = 0;
10076 idma->idma_stalled[1] = 0;
10080 * t4_idma_monitor - monitor SGE Ingress DMA state
10081 * @adapter: the adapter
10082 * @idma: the adapter IDMA Monitor state
10083 * @hz: number of ticks/second
10084 * @ticks: number of ticks since the last IDMA Monitor call
10086 void t4_idma_monitor(struct adapter *adapter,
10087 struct sge_idma_monitor_state *idma,
10090 int i, idma_same_state_cnt[2];
10092 /* Read the SGE Debug Ingress DMA Same State Count registers. These
10093 * are counters inside the SGE which count up on each clock when the
10094 * SGE finds its Ingress DMA State Engines in the same states they
10095 * were in the previous clock. The counters will peg out at
10096 * 0xffffffff without wrapping around so once they pass the 1s
10097 * threshold they'll stay above that till the IDMA state changes.
10099 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
10100 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
10101 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10103 for (i = 0; i < 2; i++) {
10104 u32 debug0, debug11;
10106 /* If the Ingress DMA Same State Counter ("timer") is less
10107 * than 1s, then we can reset our synthesized Stall Timer and
10108 * continue. If we have previously emitted warnings about a
10109 * potential stalled Ingress Queue, issue a note indicating
10110 * that the Ingress Queue has resumed forward progress.
10112 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
10113 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
10114 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
10115 "resumed after %d seconds\n",
10116 i, idma->idma_qid[i],
10117 idma->idma_stalled[i] / hz);
10118 idma->idma_stalled[i] = 0;
10122 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
10123 * domain. The first time we get here it'll be because we
10124 * passed the 1s Threshold; each additional time it'll be
10125 * because the RX Timer Callback is being fired on its regular
10128 * If the stall is below our Potential Hung Ingress Queue
10129 * Warning Threshold, continue.
10131 if (idma->idma_stalled[i] == 0) {
10132 idma->idma_stalled[i] = hz;
10133 idma->idma_warn[i] = 0;
10135 idma->idma_stalled[i] += ticks;
10136 idma->idma_warn[i] -= ticks;
10139 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
10142 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
10144 if (idma->idma_warn[i] > 0)
10146 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
10148 /* Read and save the SGE IDMA State and Queue ID information.
10149 * We do this every time in case it changes across time ...
10150 * can't be too careful ...
10152 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
10153 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10154 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
10156 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
10157 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10158 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
10160 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
10161 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
10162 i, idma->idma_qid[i], idma->idma_state[i],
10163 idma->idma_stalled[i] / hz,
10165 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
10170 * t4_load_cfg - download config file
10171 * @adap: the adapter
10172 * @cfg_data: the cfg text file to write
10173 * @size: text file size
10175 * Write the supplied config text file to the card's serial flash.
10177 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10179 int ret, i, n, cfg_addr;
10181 unsigned int flash_cfg_start_sec;
10182 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10184 cfg_addr = t4_flash_cfg_addr(adap);
10189 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10191 if (size > FLASH_CFG_MAX_SIZE) {
10192 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
10193 FLASH_CFG_MAX_SIZE);
10197 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
10199 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10200 flash_cfg_start_sec + i - 1);
10201 /* If size == 0 then we're simply erasing the FLASH sectors associated
10202 * with the on-adapter Firmware Configuration File.
10204 if (ret || size == 0)
10207 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10208 for (i = 0; i < size; i += SF_PAGE_SIZE) {
10209 if ((size - i) < SF_PAGE_SIZE)
10213 ret = t4_write_flash(adap, addr, n, cfg_data, true);
10217 addr += SF_PAGE_SIZE;
10218 cfg_data += SF_PAGE_SIZE;
10223 dev_err(adap->pdev_dev, "config file %s failed %d\n",
10224 (size == 0 ? "clear" : "download"), ret);
10229 * t4_set_vf_mac_acl - Set MAC address for the specified VF
10230 * @adapter: The adapter
10231 * @vf: one of the VFs instantiated by the specified PF
10232 * @naddr: the number of MAC addresses
10233 * @addr: the MAC address(es) to be set to the specified VF
10235 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
10236 unsigned int naddr, u8 *addr)
10238 struct fw_acl_mac_cmd cmd;
10240 memset(&cmd, 0, sizeof(cmd));
10241 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
10244 FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
10245 FW_ACL_MAC_CMD_VFN_V(vf));
10247 /* Note: Do not enable the ACL */
10248 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
10251 switch (adapter->pf) {
10253 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
10256 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
10259 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
10262 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
10266 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
10270 * t4_read_pace_tbl - read the pace table
10271 * @adap: the adapter
10272 * @pace_vals: holds the returned values
10274 * Returns the values of TP's pace table in microseconds.
10276 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
10280 for (i = 0; i < NTX_SCHED; i++) {
10281 t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
10282 v = t4_read_reg(adap, TP_PACE_TABLE_A);
10283 pace_vals[i] = dack_ticks_to_usec(adap, v);
10288 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10289 * @adap: the adapter
10290 * @sched: the scheduler index
10291 * @kbps: the byte rate in Kbps
10292 * @ipg: the interpacket delay in tenths of nanoseconds
10293 * @sleep_ok: if true we may sleep while awaiting command completion
10295 * Return the current configuration of a HW Tx scheduler.
10297 void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
10298 unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
10300 unsigned int v, addr, bpt, cpt;
10303 addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
10304 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10307 bpt = (v >> 8) & 0xff;
10310 *kbps = 0; /* scheduler disabled */
10312 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10313 *kbps = (v * bpt) / 125;
10317 addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
10318 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10322 *ipg = (10000 * v) / core_ticks_per_usec(adap);
10326 /* t4_sge_ctxt_rd - read an SGE context through FW
10327 * @adap: the adapter
10328 * @mbox: mailbox to use for the FW command
10329 * @cid: the context id
10330 * @ctype: the context type
10331 * @data: where to store the context data
10333 * Issues a FW command through the given mailbox to read an SGE context.
10335 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10336 enum ctxt_type ctype, u32 *data)
10338 struct fw_ldst_cmd c;
10341 if (ctype == CTXT_FLM)
10342 ret = FW_LDST_ADDRSPC_SGE_FLMC;
10344 ret = FW_LDST_ADDRSPC_SGE_CONMC;
10346 memset(&c, 0, sizeof(c));
10347 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10348 FW_CMD_REQUEST_F | FW_CMD_READ_F |
10349 FW_LDST_CMD_ADDRSPACE_V(ret));
10350 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10351 c.u.idctxt.physid = cpu_to_be32(cid);
10353 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10355 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10356 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10357 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10358 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10359 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10360 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10366 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10367 * @adap: the adapter
10368 * @cid: the context id
10369 * @ctype: the context type
10370 * @data: where to store the context data
10372 * Reads an SGE context directly, bypassing FW. This is only for
10373 * debugging when FW is unavailable.
10375 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
10376 enum ctxt_type ctype, u32 *data)
10380 t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
10381 ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
10383 for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
10384 *data++ = t4_read_reg(adap, i);
10388 int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
10389 u8 rateunit, u8 ratemode, u8 channel, u8 class,
10390 u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
10393 struct fw_sched_cmd cmd;
10395 memset(&cmd, 0, sizeof(cmd));
10396 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
10399 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10401 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10402 cmd.u.params.type = type;
10403 cmd.u.params.level = level;
10404 cmd.u.params.mode = mode;
10405 cmd.u.params.ch = channel;
10406 cmd.u.params.cl = class;
10407 cmd.u.params.unit = rateunit;
10408 cmd.u.params.rate = ratemode;
10409 cmd.u.params.min = cpu_to_be32(minrate);
10410 cmd.u.params.max = cpu_to_be32(maxrate);
10411 cmd.u.params.weight = cpu_to_be16(weight);
10412 cmd.u.params.pktsize = cpu_to_be16(pktsize);
10413 cmd.u.params.burstsize = cpu_to_be16(burstsize);
10415 return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
10420 * t4_i2c_rd - read I2C data from adapter
10421 * @adap: the adapter
10422 * @mbox: mailbox to use for the FW command
10423 * @port: Port number if per-port device; <0 if not
10424 * @devid: per-port device ID or absolute device ID
10425 * @offset: byte offset into device I2C space
10426 * @len: byte length of I2C space data
10427 * @buf: buffer in which to return I2C data
10429 * Reads the I2C data from the indicated device and location.
10431 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
10432 unsigned int devid, unsigned int offset,
10433 unsigned int len, u8 *buf)
10435 struct fw_ldst_cmd ldst_cmd, ldst_rpl;
10436 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
10439 if (len > I2C_PAGE_SIZE)
10442 /* Dont allow reads that spans multiple pages */
10443 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
10446 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10447 ldst_cmd.op_to_addrspace =
10448 cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10451 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
10452 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
10453 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
10454 ldst_cmd.u.i2c.did = devid;
10457 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
10459 ldst_cmd.u.i2c.boffset = offset;
10460 ldst_cmd.u.i2c.blen = i2c_len;
10462 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
10467 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
10477 * t4_set_vlan_acl - Set a VLAN id for the specified VF
10478 * @adap: the adapter
10479 * @mbox: mailbox to use for the FW command
10480 * @vf: one of the VFs instantiated by the specified PF
10481 * @vlan: The vlanid to be set
10483 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
10486 struct fw_acl_vlan_cmd vlan_cmd;
10487 unsigned int enable;
10489 enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
10490 memset(&vlan_cmd, 0, sizeof(vlan_cmd));
10491 vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
10495 FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
10496 FW_ACL_VLAN_CMD_VFN_V(vf));
10497 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
10498 /* Drop all packets that donot match vlan id */
10499 vlan_cmd.dropnovlan_fm = (enable
10500 ? (FW_ACL_VLAN_CMD_DROPNOVLAN_F |
10501 FW_ACL_VLAN_CMD_FM_F) : 0);
10503 vlan_cmd.nvlan = 1;
10504 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
10507 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
10511 * modify_device_id - Modifies the device ID of the Boot BIOS image
10512 * @device_id: the device ID to write.
10513 * @boot_data: the boot image to modify.
10515 * Write the supplied device ID to the boot BIOS image.
10517 static void modify_device_id(int device_id, u8 *boot_data)
10519 struct cxgb4_pcir_data *pcir_header;
10520 struct legacy_pci_rom_hdr *header;
10521 u8 *cur_header = boot_data;
10524 /* Loop through all chained images and change the device ID's */
10526 header = (struct legacy_pci_rom_hdr *)cur_header;
10527 pcir_offset = le16_to_cpu(header->pcir_offset);
10528 pcir_header = (struct cxgb4_pcir_data *)(cur_header +
10532 * Only modify the Device ID if code type is Legacy or HP.
10533 * 0x00: Okay to modify
10534 * 0x01: FCODE. Do not modify
10535 * 0x03: Okay to modify
10536 * 0x04-0xFF: Do not modify
10538 if (pcir_header->code_type == CXGB4_HDR_CODE1) {
10543 * Modify Device ID to match current adatper
10545 pcir_header->device_id = cpu_to_le16(device_id);
10548 * Set checksum temporarily to 0.
10549 * We will recalculate it later.
10551 header->cksum = 0x0;
10554 * Calculate and update checksum
10556 for (i = 0; i < (header->size512 * 512); i++)
10557 csum += cur_header[i];
10560 * Invert summed value to create the checksum
10561 * Writing new checksum value directly to the boot data
10563 cur_header[7] = -csum;
10565 } else if (pcir_header->code_type == CXGB4_HDR_CODE2) {
10567 * Modify Device ID to match current adatper
10569 pcir_header->device_id = cpu_to_le16(device_id);
10573 * Move header pointer up to the next image in the ROM.
10575 cur_header += header->size512 * 512;
10576 } while (!(pcir_header->indicator & CXGB4_HDR_INDI));
10580 * t4_load_boot - download boot flash
10581 * @adap: the adapter
10582 * @boot_data: the boot image to write
10583 * @boot_addr: offset in flash to write boot_data
10584 * @size: image size
10586 * Write the supplied boot image to the card's serial flash.
10587 * The boot image has the following sections: a 28-byte header and the
10590 int t4_load_boot(struct adapter *adap, u8 *boot_data,
10591 unsigned int boot_addr, unsigned int size)
10593 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10594 unsigned int boot_sector = (boot_addr * 1024);
10595 struct cxgb4_pci_exp_rom_header *header;
10596 struct cxgb4_pcir_data *pcir_header;
10603 * Make sure the boot image does not encroach on the firmware region
10605 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10606 dev_err(adap->pdev_dev, "boot image encroaching on firmware region\n");
10610 /* Get boot header */
10611 header = (struct cxgb4_pci_exp_rom_header *)boot_data;
10612 pcir_offset = le16_to_cpu(header->pcir_offset);
10613 /* PCIR Data Structure */
10614 pcir_header = (struct cxgb4_pcir_data *)&boot_data[pcir_offset];
10617 * Perform some primitive sanity testing to avoid accidentally
10618 * writing garbage over the boot sectors. We ought to check for
10619 * more but it's not worth it for now ...
10621 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10622 dev_err(adap->pdev_dev, "boot image too small/large\n");
10626 if (le16_to_cpu(header->signature) != BOOT_SIGNATURE) {
10627 dev_err(adap->pdev_dev, "Boot image missing signature\n");
10631 /* Check PCI header signature */
10632 if (le32_to_cpu(pcir_header->signature) != PCIR_SIGNATURE) {
10633 dev_err(adap->pdev_dev, "PCI header missing signature\n");
10637 /* Check Vendor ID matches Chelsio ID*/
10638 if (le16_to_cpu(pcir_header->vendor_id) != PCI_VENDOR_ID_CHELSIO) {
10639 dev_err(adap->pdev_dev, "Vendor ID missing signature\n");
10644 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10645 * and Boot configuration data sections. These 3 boot sections span
10646 * sectors 0 to 7 in flash and live right before the FW image location.
10648 i = DIV_ROUND_UP(size ? size : FLASH_FW_START, sf_sec_size);
10649 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10650 (boot_sector >> 16) + i - 1);
10653 * If size == 0 then we're simply erasing the FLASH sectors associated
10654 * with the on-adapter option ROM file
10656 if (ret || size == 0)
10658 /* Retrieve adapter's device ID */
10659 pci_read_config_word(adap->pdev, PCI_DEVICE_ID, &device_id);
10660 /* Want to deal with PF 0 so I strip off PF 4 indicator */
10661 device_id = device_id & 0xf0ff;
10663 /* Check PCIE Device ID */
10664 if (le16_to_cpu(pcir_header->device_id) != device_id) {
10666 * Change the device ID in the Boot BIOS image to match
10667 * the Device ID of the current adapter.
10669 modify_device_id(device_id, boot_data);
10673 * Skip over the first SF_PAGE_SIZE worth of data and write it after
10674 * we finish copying the rest of the boot image. This will ensure
10675 * that the BIOS boot header will only be written if the boot image
10676 * was written in full.
10678 addr = boot_sector;
10679 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10680 addr += SF_PAGE_SIZE;
10681 boot_data += SF_PAGE_SIZE;
10682 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
10688 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10689 (const u8 *)header, false);
10693 dev_err(adap->pdev_dev, "boot image load failed, error %d\n",
10699 * t4_flash_bootcfg_addr - return the address of the flash
10700 * optionrom configuration
10701 * @adapter: the adapter
10703 * Return the address within the flash where the OptionROM Configuration
10704 * is stored, or an error if the device FLASH is too small to contain
10705 * a OptionROM Configuration.
10707 static int t4_flash_bootcfg_addr(struct adapter *adapter)
10710 * If the device FLASH isn't large enough to hold a Firmware
10711 * Configuration File, return an error.
10713 if (adapter->params.sf_size <
10714 FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10717 return FLASH_BOOTCFG_START;
10720 int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10722 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10723 struct cxgb4_bootcfg_data *header;
10724 unsigned int flash_cfg_start_sec;
10725 unsigned int addr, npad;
10726 int ret, i, n, cfg_addr;
10728 cfg_addr = t4_flash_bootcfg_addr(adap);
10733 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10735 if (size > FLASH_BOOTCFG_MAX_SIZE) {
10736 dev_err(adap->pdev_dev, "bootcfg file too large, max is %u bytes\n",
10737 FLASH_BOOTCFG_MAX_SIZE);
10741 header = (struct cxgb4_bootcfg_data *)cfg_data;
10742 if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) {
10743 dev_err(adap->pdev_dev, "Wrong bootcfg signature\n");
10748 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,
10750 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10751 flash_cfg_start_sec + i - 1);
10754 * If size == 0 then we're simply erasing the FLASH sectors associated
10755 * with the on-adapter OptionROM Configuration File.
10757 if (ret || size == 0)
10760 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10761 for (i = 0; i < size; i += SF_PAGE_SIZE) {
10762 n = min_t(u32, size - i, SF_PAGE_SIZE);
10764 ret = t4_write_flash(adap, addr, n, cfg_data, false);
10768 addr += SF_PAGE_SIZE;
10769 cfg_data += SF_PAGE_SIZE;
10772 npad = ((size + 4 - 1) & ~3) - size;
10773 for (i = 0; i < npad; i++) {
10776 ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
10784 dev_err(adap->pdev_dev, "boot config data %s failed %d\n",
10785 (size == 0 ? "clear" : "download"), ret);