2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/delay.h>
38 #include "t4_values.h"
40 #include "t4fw_version.h"
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
61 u32 val = t4_read_reg(adapter, reg);
63 if (!!(val & mask) == polarity) {
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
89 * Sets a register field specified by the supplied mask to the
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
102 * t4_read_indirect - read indirectly addressed registers
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
110 * Reads registers that are accessed indirectly through an address/data
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
125 * t4_write_indirect - write indirectly addressed registers
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
154 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
156 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
161 if (is_t4(adap->params.chip))
164 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
167 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168 * Configuration Space read. (None of the other fields matter when
169 * ENABLE is 0 so a simple register write is easier than a
170 * read-modify-write via t4_set_reg_field().)
172 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
176 * t4_report_fw_error - report firmware error
179 * The adapter firmware can indicate error conditions to the host.
180 * If the firmware has indicated an error, print out the reason for
181 * the firmware error.
183 static void t4_report_fw_error(struct adapter *adap)
185 static const char *const reason[] = {
186 "Crash", /* PCIE_FW_EVAL_CRASH */
187 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
188 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
189 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
192 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193 "Reserved", /* reserved */
197 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198 if (pcie_fw & PCIE_FW_ERR_F) {
199 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200 reason[PCIE_FW_EVAL_G(pcie_fw)]);
201 adap->flags &= ~CXGB4_FW_OK;
206 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
208 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
211 for ( ; nflit; nflit--, mbox_addr += 8)
212 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
216 * Handle a FW assertion reported in a mailbox.
218 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
220 struct fw_debug_cmd asrt;
222 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
223 dev_alert(adap->pdev_dev,
224 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
225 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
226 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
230 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
231 * @adapter: the adapter
232 * @cmd: the Firmware Mailbox Command or Reply
233 * @size: command length in bytes
234 * @access: the time (ms) needed to access the Firmware Mailbox
235 * @execute: the time (ms) the command spent being executed
237 static void t4_record_mbox(struct adapter *adapter,
238 const __be64 *cmd, unsigned int size,
239 int access, int execute)
241 struct mbox_cmd_log *log = adapter->mbox_log;
242 struct mbox_cmd *entry;
245 entry = mbox_cmd_log_entry(log, log->cursor++);
246 if (log->cursor == log->size)
249 for (i = 0; i < size / 8; i++)
250 entry->cmd[i] = be64_to_cpu(cmd[i]);
251 while (i < MBOX_LEN / 8)
253 entry->timestamp = jiffies;
254 entry->seqno = log->seqno++;
255 entry->access = access;
256 entry->execute = execute;
260 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
262 * @mbox: index of the mailbox to use
263 * @cmd: the command to write
264 * @size: command length in bytes
265 * @rpl: where to optionally store the reply
266 * @sleep_ok: if true we may sleep while awaiting command completion
267 * @timeout: time to wait for command to finish before timing out
269 * Sends the given command to FW through the selected mailbox and waits
270 * for the FW to execute the command. If @rpl is not %NULL it is used to
271 * store the FW's reply to the command. The command and its optional
272 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
273 * to respond. @sleep_ok determines whether we may sleep while awaiting
274 * the response. If sleeping is allowed we use progressive backoff
277 * The return value is 0 on success or a negative errno on failure. A
278 * failure can happen either because we are not able to execute the
279 * command or FW executes it but signals an error. In the latter case
280 * the return value is the error code indicated by FW (negated).
282 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
283 int size, void *rpl, bool sleep_ok, int timeout)
285 static const int delay[] = {
286 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
289 struct mbox_list entry;
294 int i, ms, delay_idx, ret;
295 const __be64 *p = cmd;
296 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
297 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
298 __be64 cmd_rpl[MBOX_LEN / 8];
301 if ((size & 15) || size > MBOX_LEN)
305 * If the device is off-line, as in EEH, commands will time out.
306 * Fail them early so we don't waste time waiting.
308 if (adap->pdev->error_state != pci_channel_io_normal)
311 /* If we have a negative timeout, that implies that we can't sleep. */
317 /* Queue ourselves onto the mailbox access list. When our entry is at
318 * the front of the list, we have rights to access the mailbox. So we
319 * wait [for a while] till we're at the front [or bail out with an
322 spin_lock_bh(&adap->mbox_lock);
323 list_add_tail(&entry.list, &adap->mlist.list);
324 spin_unlock_bh(&adap->mbox_lock);
329 for (i = 0; ; i += ms) {
330 /* If we've waited too long, return a busy indication. This
331 * really ought to be based on our initial position in the
332 * mailbox access list but this is a start. We very rarely
333 * contend on access to the mailbox ...
335 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
336 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
337 spin_lock_bh(&adap->mbox_lock);
338 list_del(&entry.list);
339 spin_unlock_bh(&adap->mbox_lock);
340 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
341 t4_record_mbox(adap, cmd, size, access, ret);
345 /* If we're at the head, break out and start the mailbox
348 if (list_first_entry(&adap->mlist.list, struct mbox_list,
352 /* Delay for a bit before checking again ... */
354 ms = delay[delay_idx]; /* last element may repeat */
355 if (delay_idx < ARRAY_SIZE(delay) - 1)
363 /* Loop trying to get ownership of the mailbox. Return an error
364 * if we can't gain ownership.
366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
368 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
369 if (v != MBOX_OWNER_DRV) {
370 spin_lock_bh(&adap->mbox_lock);
371 list_del(&entry.list);
372 spin_unlock_bh(&adap->mbox_lock);
373 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
374 t4_record_mbox(adap, cmd, size, access, ret);
378 /* Copy in the new mailbox command and send it on its way ... */
379 t4_record_mbox(adap, cmd, size, access, 0);
380 for (i = 0; i < size; i += 8)
381 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
383 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
384 t4_read_reg(adap, ctl_reg); /* flush write */
390 !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
394 ms = delay[delay_idx]; /* last element may repeat */
395 if (delay_idx < ARRAY_SIZE(delay) - 1)
401 v = t4_read_reg(adap, ctl_reg);
402 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
403 if (!(v & MBMSGVALID_F)) {
404 t4_write_reg(adap, ctl_reg, 0);
408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
409 res = be64_to_cpu(cmd_rpl[0]);
411 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
412 fw_asrt(adap, data_reg);
413 res = FW_CMD_RETVAL_V(EIO);
415 memcpy(rpl, cmd_rpl, size);
418 t4_write_reg(adap, ctl_reg, 0);
421 t4_record_mbox(adap, cmd_rpl,
422 MBOX_LEN, access, execute);
423 spin_lock_bh(&adap->mbox_lock);
424 list_del(&entry.list);
425 spin_unlock_bh(&adap->mbox_lock);
426 return -FW_CMD_RETVAL_G((int)res);
430 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
431 t4_record_mbox(adap, cmd, size, access, ret);
432 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
433 *(const u8 *)cmd, mbox);
434 t4_report_fw_error(adap);
435 spin_lock_bh(&adap->mbox_lock);
436 list_del(&entry.list);
437 spin_unlock_bh(&adap->mbox_lock);
442 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
443 void *rpl, bool sleep_ok)
445 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
449 static int t4_edc_err_read(struct adapter *adap, int idx)
451 u32 edc_ecc_err_addr_reg;
454 if (is_t4(adap->params.chip)) {
455 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
458 if (idx != 0 && idx != 1) {
459 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
463 edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
464 rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
467 "edc%d err addr 0x%x: 0x%x.\n",
468 idx, edc_ecc_err_addr_reg,
469 t4_read_reg(adap, edc_ecc_err_addr_reg));
471 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
473 (unsigned long long)t4_read_reg64(adap, rdata_reg),
474 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
475 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
476 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
477 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
478 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
479 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
480 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
481 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
487 * t4_memory_rw_init - Get memory window relative offset, base, and size.
489 * @win: PCI-E Memory Window to use
490 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
491 * @mem_off: memory relative offset with respect to @mtype.
492 * @mem_base: configured memory base address.
493 * @mem_aperture: configured memory window aperture.
495 * Get the configured memory window's relative offset, base, and size.
497 int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
498 u32 *mem_base, u32 *mem_aperture)
500 u32 edc_size, mc_size, mem_reg;
502 /* Offset into the region of memory which is being accessed
505 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
506 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
509 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
510 if (mtype == MEM_HMA) {
511 *mem_off = 2 * (edc_size * 1024 * 1024);
512 } else if (mtype != MEM_MC1) {
513 *mem_off = (mtype * (edc_size * 1024 * 1024));
515 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
516 MA_EXT_MEMORY0_BAR_A));
517 *mem_off = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
520 /* Each PCI-E Memory Window is programmed with a window size -- or
521 * "aperture" -- which controls the granularity of its mapping onto
522 * adapter memory. We need to grab that aperture in order to know
523 * how to use the specified window. The window is also programmed
524 * with the base address of the Memory Window in BAR0's address
525 * space. For T4 this is an absolute PCI-E Bus Address. For T5
526 * the address is relative to BAR0.
528 mem_reg = t4_read_reg(adap,
529 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
531 /* a dead adapter will return 0xffffffff for PIO reads */
532 if (mem_reg == 0xffffffff)
535 *mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
536 *mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
537 if (is_t4(adap->params.chip))
538 *mem_base -= adap->t4_bar0;
544 * t4_memory_update_win - Move memory window to specified address.
546 * @win: PCI-E Memory Window to use
547 * @addr: location to move.
549 * Move memory window to specified address.
551 void t4_memory_update_win(struct adapter *adap, int win, u32 addr)
554 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
556 /* Read it back to ensure that changes propagate before we
557 * attempt to use the new value.
560 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
564 * t4_memory_rw_residual - Read/Write residual data.
566 * @off: relative offset within residual to start read/write.
567 * @addr: address within indicated memory type.
568 * @buf: host memory buffer
569 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
571 * Read/Write residual data less than 32-bits.
573 void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
583 if (dir == T4_MEMORY_READ) {
584 last.word = le32_to_cpu((__force __le32)
585 t4_read_reg(adap, addr));
586 for (bp = (unsigned char *)buf, i = off; i < 4; i++)
587 bp[i] = last.byte[i];
590 for (i = off; i < 4; i++)
592 t4_write_reg(adap, addr,
593 (__force u32)cpu_to_le32(last.word));
598 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
600 * @win: PCI-E Memory Window to use
601 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
602 * @addr: address within indicated memory type
603 * @len: amount of memory to transfer
604 * @hbuf: host memory buffer
605 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
607 * Reads/writes an [almost] arbitrary memory region in the firmware: the
608 * firmware memory address and host buffer must be aligned on 32-bit
609 * boundaries; the length may be arbitrary. The memory is transferred as
610 * a raw byte sequence from/to the firmware's memory. If this memory
611 * contains data structures which contain multi-byte integers, it's the
612 * caller's responsibility to perform appropriate byte order conversions.
614 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
615 u32 len, void *hbuf, int dir)
617 u32 pos, offset, resid, memoffset;
618 u32 win_pf, mem_aperture, mem_base;
622 /* Argument sanity checks ...
624 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
628 /* It's convenient to be able to handle lengths which aren't a
629 * multiple of 32-bits because we often end up transferring files to
630 * the firmware. So we'll handle that by normalizing the length here
631 * and then handling any residual transfer at the end.
636 ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
641 /* Determine the PCIE_MEM_ACCESS_OFFSET */
642 addr = addr + memoffset;
644 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
646 /* Calculate our initial PCI-E Memory Window Position and Offset into
649 pos = addr & ~(mem_aperture - 1);
652 /* Set up initial PCI-E Memory Window to cover the start of our
655 t4_memory_update_win(adap, win, pos | win_pf);
657 /* Transfer data to/from the adapter as long as there's an integral
658 * number of 32-bit transfers to complete.
660 * A note on Endianness issues:
662 * The "register" reads and writes below from/to the PCI-E Memory
663 * Window invoke the standard adapter Big-Endian to PCI-E Link
664 * Little-Endian "swizzel." As a result, if we have the following
665 * data in adapter memory:
667 * Memory: ... | b0 | b1 | b2 | b3 | ...
668 * Address: i+0 i+1 i+2 i+3
670 * Then a read of the adapter memory via the PCI-E Memory Window
675 * [ b3 | b2 | b1 | b0 ]
677 * If this value is stored into local memory on a Little-Endian system
678 * it will show up correctly in local memory as:
680 * ( ..., b0, b1, b2, b3, ... )
682 * But on a Big-Endian system, the store will show up in memory
683 * incorrectly swizzled as:
685 * ( ..., b3, b2, b1, b0, ... )
687 * So we need to account for this in the reads and writes to the
688 * PCI-E Memory Window below by undoing the register read/write
692 if (dir == T4_MEMORY_READ)
693 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
696 t4_write_reg(adap, mem_base + offset,
697 (__force u32)cpu_to_le32(*buf++));
698 offset += sizeof(__be32);
699 len -= sizeof(__be32);
701 /* If we've reached the end of our current window aperture,
702 * move the PCI-E Memory Window on to the next. Note that
703 * doing this here after "len" may be 0 allows us to set up
704 * the PCI-E Memory Window for a possible final residual
707 if (offset == mem_aperture) {
710 t4_memory_update_win(adap, win, pos | win_pf);
714 /* If the original transfer had a length which wasn't a multiple of
715 * 32-bits, now's where we need to finish off the transfer of the
716 * residual amount. The PCI-E Memory Window has already been moved
717 * above (if necessary) to cover this final transfer.
720 t4_memory_rw_residual(adap, resid, mem_base + offset,
726 /* Return the specified PCI-E Configuration Space register from our Physical
727 * Function. We try first via a Firmware LDST Command since we prefer to let
728 * the firmware own all of these registers, but if that fails we go for it
729 * directly ourselves.
731 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
733 u32 val, ldst_addrspace;
735 /* If fw_attach != 0, construct and send the Firmware LDST Command to
736 * retrieve the specified PCI-E Configuration Space register.
738 struct fw_ldst_cmd ldst_cmd;
741 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
742 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
743 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
747 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
748 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
749 ldst_cmd.u.pcie.ctrl_to_fn =
750 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
751 ldst_cmd.u.pcie.r = reg;
753 /* If the LDST Command succeeds, return the result, otherwise
754 * fall through to reading it directly ourselves ...
756 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
759 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
761 /* Read the desired Configuration Space register via the PCI-E
762 * Backdoor mechanism.
764 t4_hw_pci_read_cfg4(adap, reg, &val);
768 /* Get the window based on base passed to it.
769 * Window aperture is currently unhandled, but there is no use case for it
772 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
777 if (is_t4(adap->params.chip)) {
780 /* Truncation intentional: we only read the bottom 32-bits of
781 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
782 * mechanism to read BAR0 instead of using
783 * pci_resource_start() because we could be operating from
784 * within a Virtual Machine which is trapping our accesses to
785 * our Configuration Space and we need to set up the PCI-E
786 * Memory Window decoders with the actual addresses which will
787 * be coming across the PCI-E link.
789 bar0 = t4_read_pcie_cfg4(adap, pci_base);
791 adap->t4_bar0 = bar0;
793 ret = bar0 + memwin_base;
795 /* For T5, only relative offset inside the PCIe BAR is passed */
801 /* Get the default utility window (win0) used by everyone */
802 u32 t4_get_util_window(struct adapter *adap)
804 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
805 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
808 /* Set up memory window for accessing adapter memory ranges. (Read
809 * back MA register to ensure that changes propagate before we attempt
810 * to use the new values.)
812 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
815 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
816 memwin_base | BIR_V(0) |
817 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
819 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
823 * t4_get_regs_len - return the size of the chips register set
824 * @adapter: the adapter
826 * Returns the size of the chip's BAR0 register space.
828 unsigned int t4_get_regs_len(struct adapter *adapter)
830 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
832 switch (chip_version) {
834 return T4_REGMAP_SIZE;
838 return T5_REGMAP_SIZE;
841 dev_err(adapter->pdev_dev,
842 "Unsupported chip version %d\n", chip_version);
847 * t4_get_regs - read chip registers into provided buffer
849 * @buf: register buffer
850 * @buf_size: size (in bytes) of register buffer
852 * If the provided register buffer isn't large enough for the chip's
853 * full register range, the register dump will be truncated to the
854 * register buffer's size.
856 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
858 static const unsigned int t4_reg_ranges[] = {
1317 static const unsigned int t5_reg_ranges[] = {
2081 static const unsigned int t6_reg_ranges[] = {
2640 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2641 const unsigned int *reg_ranges;
2642 int reg_ranges_size, range;
2643 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2645 /* Select the right set of register ranges to dump depending on the
2646 * adapter chip type.
2648 switch (chip_version) {
2650 reg_ranges = t4_reg_ranges;
2651 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2655 reg_ranges = t5_reg_ranges;
2656 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2660 reg_ranges = t6_reg_ranges;
2661 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2665 dev_err(adap->pdev_dev,
2666 "Unsupported chip version %d\n", chip_version);
2670 /* Clear the register buffer and insert the appropriate register
2671 * values selected by the above register ranges.
2673 memset(buf, 0, buf_size);
2674 for (range = 0; range < reg_ranges_size; range += 2) {
2675 unsigned int reg = reg_ranges[range];
2676 unsigned int last_reg = reg_ranges[range + 1];
2677 u32 *bufp = (u32 *)((char *)buf + reg);
2679 /* Iterate across the register range filling in the register
2680 * buffer but don't write past the end of the register buffer.
2682 while (reg <= last_reg && bufp < buf_end) {
2683 *bufp++ = t4_read_reg(adap, reg);
2689 #define EEPROM_STAT_ADDR 0x7bfc
2690 #define VPD_BASE 0x400
2691 #define VPD_BASE_OLD 0
2692 #define VPD_LEN 1024
2695 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2696 * @phys_addr: the physical EEPROM address
2697 * @fn: the PCI function number
2698 * @sz: size of function-specific area
2700 * Translate a physical EEPROM address to virtual. The first 1K is
2701 * accessed through virtual addresses starting at 31K, the rest is
2702 * accessed through virtual addresses starting at 0.
2704 * The mapping is as follows:
2705 * [0..1K) -> [31K..32K)
2706 * [1K..1K+A) -> [31K-A..31K)
2707 * [1K+A..ES) -> [0..ES-A-1K)
2709 * where A = @fn * @sz, and ES = EEPROM size.
2711 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2714 if (phys_addr < 1024)
2715 return phys_addr + (31 << 10);
2716 if (phys_addr < 1024 + fn)
2717 return 31744 - fn + phys_addr - 1024;
2718 if (phys_addr < EEPROMSIZE)
2719 return phys_addr - 1024 - fn;
2724 * t4_seeprom_wp - enable/disable EEPROM write protection
2725 * @adapter: the adapter
2726 * @enable: whether to enable or disable write protection
2728 * Enables or disables write protection on the serial EEPROM.
2730 int t4_seeprom_wp(struct adapter *adapter, bool enable)
2732 unsigned int v = enable ? 0xc : 0;
2733 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2734 return ret < 0 ? ret : 0;
2738 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2739 * @adapter: adapter to read
2740 * @p: where to store the parameters
2742 * Reads card parameters stored in VPD EEPROM.
2744 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2746 int i, ret = 0, addr;
2748 u8 *vpd, csum, base_val = 0;
2749 unsigned int vpdr_len, kw_offset, id_len;
2751 vpd = vmalloc(VPD_LEN);
2755 /* Card information normally starts at VPD_BASE but early cards had
2758 ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
2762 addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : VPD_BASE_OLD;
2764 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2768 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2769 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
2774 id_len = pci_vpd_lrdt_size(vpd);
2775 if (id_len > ID_LEN)
2778 i = pci_vpd_find_tag(vpd, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2780 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
2785 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2786 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2787 if (vpdr_len + kw_offset > VPD_LEN) {
2788 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
2793 #define FIND_VPD_KW(var, name) do { \
2794 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
2796 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
2800 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2803 FIND_VPD_KW(i, "RV");
2804 for (csum = 0; i >= 0; i--)
2808 dev_err(adapter->pdev_dev,
2809 "corrupted VPD EEPROM, actual csum %u\n", csum);
2814 FIND_VPD_KW(ec, "EC");
2815 FIND_VPD_KW(sn, "SN");
2816 FIND_VPD_KW(pn, "PN");
2817 FIND_VPD_KW(na, "NA");
2820 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
2822 memcpy(p->ec, vpd + ec, EC_LEN);
2824 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2825 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2827 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
2828 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2830 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2831 strim((char *)p->na);
2835 return ret < 0 ? ret : 0;
2839 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2840 * @adapter: adapter to read
2841 * @p: where to store the parameters
2843 * Reads card parameters stored in VPD EEPROM and retrieves the Core
2844 * Clock. This can only be called after a connection to the firmware
2847 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2849 u32 cclk_param, cclk_val;
2852 /* Grab the raw VPD parameters.
2854 ret = t4_get_raw_vpd_params(adapter, p);
2858 /* Ask firmware for the Core Clock since it knows how to translate the
2859 * Reference Clock ('V2') VPD field into a Core Clock value ...
2861 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2862 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2863 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2864 1, &cclk_param, &cclk_val);
2874 * t4_get_pfres - retrieve VF resource limits
2875 * @adapter: the adapter
2877 * Retrieves configured resource limits and capabilities for a physical
2878 * function. The results are stored in @adapter->pfres.
2880 int t4_get_pfres(struct adapter *adapter)
2882 struct pf_resources *pfres = &adapter->params.pfres;
2883 struct fw_pfvf_cmd cmd, rpl;
2887 /* Execute PFVF Read command to get VF resource limits; bail out early
2888 * with error on command failure.
2890 memset(&cmd, 0, sizeof(cmd));
2891 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
2894 FW_PFVF_CMD_PFN_V(adapter->pf) |
2895 FW_PFVF_CMD_VFN_V(0));
2896 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2897 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
2898 if (v != FW_SUCCESS)
2901 /* Extract PF resource limits and return success.
2903 word = be32_to_cpu(rpl.niqflint_niq);
2904 pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
2905 pfres->niq = FW_PFVF_CMD_NIQ_G(word);
2907 word = be32_to_cpu(rpl.type_to_neq);
2908 pfres->neq = FW_PFVF_CMD_NEQ_G(word);
2909 pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
2911 word = be32_to_cpu(rpl.tc_to_nexactf);
2912 pfres->tc = FW_PFVF_CMD_TC_G(word);
2913 pfres->nvi = FW_PFVF_CMD_NVI_G(word);
2914 pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
2916 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
2917 pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
2918 pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
2919 pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
2924 /* serial flash and firmware constants */
2926 SF_ATTEMPTS = 10, /* max retries for SF operations */
2928 /* flash command opcodes */
2929 SF_PROG_PAGE = 2, /* program page */
2930 SF_WR_DISABLE = 4, /* disable writes */
2931 SF_RD_STATUS = 5, /* read status register */
2932 SF_WR_ENABLE = 6, /* enable writes */
2933 SF_RD_DATA_FAST = 0xb, /* read flash */
2934 SF_RD_ID = 0x9f, /* read ID */
2935 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2939 * sf1_read - read data from the serial flash
2940 * @adapter: the adapter
2941 * @byte_cnt: number of bytes to read
2942 * @cont: whether another operation will be chained
2943 * @lock: whether to lock SF for PL access only
2944 * @valp: where to store the read data
2946 * Reads up to 4 bytes of data from the serial flash. The location of
2947 * the read needs to be specified prior to calling this by issuing the
2948 * appropriate commands to the serial flash.
2950 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2951 int lock, u32 *valp)
2955 if (!byte_cnt || byte_cnt > 4)
2957 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2959 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2960 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2961 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2963 *valp = t4_read_reg(adapter, SF_DATA_A);
2968 * sf1_write - write data to the serial flash
2969 * @adapter: the adapter
2970 * @byte_cnt: number of bytes to write
2971 * @cont: whether another operation will be chained
2972 * @lock: whether to lock SF for PL access only
2973 * @val: value to write
2975 * Writes up to 4 bytes of data to the serial flash. The location of
2976 * the write needs to be specified prior to calling this by issuing the
2977 * appropriate commands to the serial flash.
2979 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2982 if (!byte_cnt || byte_cnt > 4)
2984 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2986 t4_write_reg(adapter, SF_DATA_A, val);
2987 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2988 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2989 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2993 * flash_wait_op - wait for a flash operation to complete
2994 * @adapter: the adapter
2995 * @attempts: max number of polls of the status register
2996 * @delay: delay between polls in ms
2998 * Wait for a flash operation to complete by polling the status register.
3000 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3006 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3007 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3011 if (--attempts == 0)
3019 * t4_read_flash - read words from serial flash
3020 * @adapter: the adapter
3021 * @addr: the start address for the read
3022 * @nwords: how many 32-bit words to read
3023 * @data: where to store the read data
3024 * @byte_oriented: whether to store data as bytes or as words
3026 * Read the specified number of 32-bit words from the serial flash.
3027 * If @byte_oriented is set the read data is stored as a byte array
3028 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3029 * natural endianness.
3031 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3032 unsigned int nwords, u32 *data, int byte_oriented)
3036 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3039 addr = swab32(addr) | SF_RD_DATA_FAST;
3041 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3042 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3045 for ( ; nwords; nwords--, data++) {
3046 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3048 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3052 *data = (__force __u32)(cpu_to_be32(*data));
3058 * t4_write_flash - write up to a page of data to the serial flash
3059 * @adapter: the adapter
3060 * @addr: the start address to write
3061 * @n: length of data to write in bytes
3062 * @data: the data to write
3064 * Writes up to a page of data (256 bytes) to the serial flash starting
3065 * at the given address. All the data must be written to the same page.
3067 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
3068 unsigned int n, const u8 *data)
3072 unsigned int i, c, left, val, offset = addr & 0xff;
3074 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3077 val = swab32(addr) | SF_PROG_PAGE;
3079 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3080 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3083 for (left = n; left; left -= c) {
3085 for (val = 0, i = 0; i < c; ++i)
3086 val = (val << 8) + *data++;
3088 ret = sf1_write(adapter, c, c != left, 1, val);
3092 ret = flash_wait_op(adapter, 8, 1);
3096 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3098 /* Read the page to verify the write succeeded */
3099 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
3103 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3104 dev_err(adapter->pdev_dev,
3105 "failed to correctly write the flash page at %#x\n",
3112 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3117 * t4_get_fw_version - read the firmware version
3118 * @adapter: the adapter
3119 * @vers: where to place the version
3121 * Reads the FW version from flash.
3123 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3125 return t4_read_flash(adapter, FLASH_FW_START +
3126 offsetof(struct fw_hdr, fw_ver), 1,
3131 * t4_get_bs_version - read the firmware bootstrap version
3132 * @adapter: the adapter
3133 * @vers: where to place the version
3135 * Reads the FW Bootstrap version from flash.
3137 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3139 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3140 offsetof(struct fw_hdr, fw_ver), 1,
3145 * t4_get_tp_version - read the TP microcode version
3146 * @adapter: the adapter
3147 * @vers: where to place the version
3149 * Reads the TP microcode version from flash.
3151 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3153 return t4_read_flash(adapter, FLASH_FW_START +
3154 offsetof(struct fw_hdr, tp_microcode_ver),
3159 * t4_get_exprom_version - return the Expansion ROM version (if any)
3160 * @adap: the adapter
3161 * @vers: where to place the version
3163 * Reads the Expansion ROM header from FLASH and returns the version
3164 * number (if present) through the @vers return value pointer. We return
3165 * this in the Firmware Version Format since it's convenient. Return
3166 * 0 on success, -ENOENT if no Expansion ROM is present.
3168 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3170 struct exprom_header {
3171 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3172 unsigned char hdr_ver[4]; /* Expansion ROM version */
3174 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3178 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3179 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3184 hdr = (struct exprom_header *)exprom_header_buf;
3185 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3188 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3189 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3190 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3191 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3196 * t4_get_vpd_version - return the VPD version
3197 * @adapter: the adapter
3198 * @vers: where to place the version
3200 * Reads the VPD via the Firmware interface (thus this can only be called
3201 * once we're ready to issue Firmware commands). The format of the
3202 * VPD version is adapter specific. Returns 0 on success, an error on
3205 * Note that early versions of the Firmware didn't include the ability
3206 * to retrieve the VPD version, so we zero-out the return-value parameter
3207 * in that case to avoid leaving it with garbage in it.
3209 * Also note that the Firmware will return its cached copy of the VPD
3210 * Revision ID, not the actual Revision ID as written in the Serial
3211 * EEPROM. This is only an issue if a new VPD has been written and the
3212 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3213 * to defer calling this routine till after a FW_RESET_CMD has been issued
3214 * if the Host Driver will be performing a full adapter initialization.
3216 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3221 vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3222 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3223 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3224 1, &vpdrev_param, vers);
3231 * t4_get_scfg_version - return the Serial Configuration version
3232 * @adapter: the adapter
3233 * @vers: where to place the version
3235 * Reads the Serial Configuration Version via the Firmware interface
3236 * (thus this can only be called once we're ready to issue Firmware
3237 * commands). The format of the Serial Configuration version is
3238 * adapter specific. Returns 0 on success, an error on failure.
3240 * Note that early versions of the Firmware didn't include the ability
3241 * to retrieve the Serial Configuration version, so we zero-out the
3242 * return-value parameter in that case to avoid leaving it with
3245 * Also note that the Firmware will return its cached copy of the Serial
3246 * Initialization Revision ID, not the actual Revision ID as written in
3247 * the Serial EEPROM. This is only an issue if a new VPD has been written
3248 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3249 * it's best to defer calling this routine till after a FW_RESET_CMD has
3250 * been issued if the Host Driver will be performing a full adapter
3253 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3258 scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3259 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3260 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3261 1, &scfgrev_param, vers);
3268 * t4_get_version_info - extract various chip/firmware version information
3269 * @adapter: the adapter
3271 * Reads various chip/firmware version numbers and stores them into the
3272 * adapter Adapter Parameters structure. If any of the efforts fails
3273 * the first failure will be returned, but all of the version numbers
3276 int t4_get_version_info(struct adapter *adapter)
3280 #define FIRST_RET(__getvinfo) \
3282 int __ret = __getvinfo; \
3283 if (__ret && !ret) \
3287 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3288 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3289 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3290 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3291 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3292 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3299 * t4_dump_version_info - dump all of the adapter configuration IDs
3300 * @adapter: the adapter
3302 * Dumps all of the various bits of adapter configuration version/revision
3303 * IDs information. This is typically called at some point after
3304 * t4_get_version_info() has been called.
3306 void t4_dump_version_info(struct adapter *adapter)
3308 /* Device information */
3309 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3310 adapter->params.vpd.id,
3311 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3312 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3313 adapter->params.vpd.sn, adapter->params.vpd.pn);
3315 /* Firmware Version */
3316 if (!adapter->params.fw_vers)
3317 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3319 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3320 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3321 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3322 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3323 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3325 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3326 * Firmware, so dev_info() is more appropriate here.)
3328 if (!adapter->params.bs_vers)
3329 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3331 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3332 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3333 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3334 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3335 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3337 /* TP Microcode Version */
3338 if (!adapter->params.tp_vers)
3339 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3341 dev_info(adapter->pdev_dev,
3342 "TP Microcode version: %u.%u.%u.%u\n",
3343 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3344 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3345 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3346 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3348 /* Expansion ROM version */
3349 if (!adapter->params.er_vers)
3350 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3352 dev_info(adapter->pdev_dev,
3353 "Expansion ROM version: %u.%u.%u.%u\n",
3354 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3355 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3356 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3357 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3359 /* Serial Configuration version */
3360 dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3361 adapter->params.scfg_vers);
3364 dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3365 adapter->params.vpd_vers);
3369 * t4_check_fw_version - check if the FW is supported with this driver
3370 * @adap: the adapter
3372 * Checks if an adapter's FW is compatible with the driver. Returns 0
3373 * if there's exact match, a negative error if the version could not be
3374 * read or there's a major version mismatch
3376 int t4_check_fw_version(struct adapter *adap)
3378 int i, ret, major, minor, micro;
3379 int exp_major, exp_minor, exp_micro;
3380 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3382 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3383 /* Try multiple times before returning error */
3384 for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3385 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3390 major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3391 minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3392 micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3394 switch (chip_version) {
3396 exp_major = T4FW_MIN_VERSION_MAJOR;
3397 exp_minor = T4FW_MIN_VERSION_MINOR;
3398 exp_micro = T4FW_MIN_VERSION_MICRO;
3401 exp_major = T5FW_MIN_VERSION_MAJOR;
3402 exp_minor = T5FW_MIN_VERSION_MINOR;
3403 exp_micro = T5FW_MIN_VERSION_MICRO;
3406 exp_major = T6FW_MIN_VERSION_MAJOR;
3407 exp_minor = T6FW_MIN_VERSION_MINOR;
3408 exp_micro = T6FW_MIN_VERSION_MICRO;
3411 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3416 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3417 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3418 dev_err(adap->pdev_dev,
3419 "Card has firmware version %u.%u.%u, minimum "
3420 "supported firmware is %u.%u.%u.\n", major, minor,
3421 micro, exp_major, exp_minor, exp_micro);
3427 /* Is the given firmware API compatible with the one the driver was compiled
3430 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3433 /* short circuit if it's the exact same firmware version */
3434 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3437 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3438 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3439 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3446 /* The firmware in the filesystem is usable, but should it be installed?
3447 * This routine explains itself in detail if it indicates the filesystem
3448 * firmware should be installed.
3450 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3455 if (!card_fw_usable) {
3456 reason = "incompatible or unusable";
3461 reason = "older than the version supported with this driver";
3468 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3469 "installing firmware %u.%u.%u.%u on card.\n",
3470 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3471 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3472 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3473 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3478 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3479 const u8 *fw_data, unsigned int fw_size,
3480 struct fw_hdr *card_fw, enum dev_state state,
3483 int ret, card_fw_usable, fs_fw_usable;
3484 const struct fw_hdr *fs_fw;
3485 const struct fw_hdr *drv_fw;
3487 drv_fw = &fw_info->fw_hdr;
3489 /* Read the header of the firmware on the card */
3490 ret = t4_read_flash(adap, FLASH_FW_START,
3491 sizeof(*card_fw) / sizeof(uint32_t),
3492 (uint32_t *)card_fw, 1);
3494 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3496 dev_err(adap->pdev_dev,
3497 "Unable to read card's firmware header: %d\n", ret);
3501 if (fw_data != NULL) {
3502 fs_fw = (const void *)fw_data;
3503 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3509 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3510 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3511 /* Common case: the firmware on the card is an exact match and
3512 * the filesystem one is an exact match too, or the filesystem
3513 * one is absent/incompatible.
3515 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3516 should_install_fs_fw(adap, card_fw_usable,
3517 be32_to_cpu(fs_fw->fw_ver),
3518 be32_to_cpu(card_fw->fw_ver))) {
3519 ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
3522 dev_err(adap->pdev_dev,
3523 "failed to install firmware: %d\n", ret);
3527 /* Installed successfully, update the cached header too. */
3530 *reset = 0; /* already reset as part of load_fw */
3533 if (!card_fw_usable) {
3536 d = be32_to_cpu(drv_fw->fw_ver);
3537 c = be32_to_cpu(card_fw->fw_ver);
3538 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3540 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3542 "driver compiled with %d.%d.%d.%d, "
3543 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3545 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3546 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3547 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3548 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3549 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3550 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3555 /* We're using whatever's on the card and it's known to be good. */
3556 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3557 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3564 * t4_flash_erase_sectors - erase a range of flash sectors
3565 * @adapter: the adapter
3566 * @start: the first sector to erase
3567 * @end: the last sector to erase
3569 * Erases the sectors in the given inclusive range.
3571 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3575 if (end >= adapter->params.sf_nsec)
3578 while (start <= end) {
3579 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3580 (ret = sf1_write(adapter, 4, 0, 1,
3581 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3582 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3583 dev_err(adapter->pdev_dev,
3584 "erase of flash sector %d failed, error %d\n",
3590 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3595 * t4_flash_cfg_addr - return the address of the flash configuration file
3596 * @adapter: the adapter
3598 * Return the address within the flash where the Firmware Configuration
3601 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3603 if (adapter->params.sf_size == 0x100000)
3604 return FLASH_FPGA_CFG_START;
3606 return FLASH_CFG_START;
3609 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
3610 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3611 * and emit an error message for mismatched firmware to save our caller the
3614 static bool t4_fw_matches_chip(const struct adapter *adap,
3615 const struct fw_hdr *hdr)
3617 /* The expression below will return FALSE for any unsupported adapter
3618 * which will keep us "honest" in the future ...
3620 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3621 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3622 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3625 dev_err(adap->pdev_dev,
3626 "FW image (%d) is not suitable for this adapter (%d)\n",
3627 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3632 * t4_load_fw - download firmware
3633 * @adap: the adapter
3634 * @fw_data: the firmware image to write
3637 * Write the supplied firmware image to the card's serial flash.
3639 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3644 u8 first_page[SF_PAGE_SIZE];
3645 const __be32 *p = (const __be32 *)fw_data;
3646 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3647 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3648 unsigned int fw_start_sec = FLASH_FW_START_SEC;
3649 unsigned int fw_size = FLASH_FW_MAX_SIZE;
3650 unsigned int fw_start = FLASH_FW_START;
3653 dev_err(adap->pdev_dev, "FW image has no data\n");
3657 dev_err(adap->pdev_dev,
3658 "FW image size not multiple of 512 bytes\n");
3661 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3662 dev_err(adap->pdev_dev,
3663 "FW image size differs from size in FW header\n");
3666 if (size > fw_size) {
3667 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3671 if (!t4_fw_matches_chip(adap, hdr))
3674 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3675 csum += be32_to_cpu(p[i]);
3677 if (csum != 0xffffffff) {
3678 dev_err(adap->pdev_dev,
3679 "corrupted firmware image, checksum %#x\n", csum);
3683 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3684 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3689 * We write the correct version at the end so the driver can see a bad
3690 * version if the FW write fails. Start by writing a copy of the
3691 * first page with a bad version.
3693 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3694 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3695 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
3700 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3701 addr += SF_PAGE_SIZE;
3702 fw_data += SF_PAGE_SIZE;
3703 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
3708 ret = t4_write_flash(adap,
3709 fw_start + offsetof(struct fw_hdr, fw_ver),
3710 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3713 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3716 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3721 * t4_phy_fw_ver - return current PHY firmware version
3722 * @adap: the adapter
3723 * @phy_fw_ver: return value buffer for PHY firmware version
3725 * Returns the current version of external PHY firmware on the
3728 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3733 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3734 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3735 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3736 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3737 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3746 * t4_load_phy_fw - download port PHY firmware
3747 * @adap: the adapter
3748 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
3749 * @phy_fw_version: function to check PHY firmware versions
3750 * @phy_fw_data: the PHY firmware image to write
3751 * @phy_fw_size: image size
3753 * Transfer the specified PHY firmware to the adapter. If a non-NULL
3754 * @phy_fw_version is supplied, then it will be used to determine if
3755 * it's necessary to perform the transfer by comparing the version
3756 * of any existing adapter PHY firmware with that of the passed in
3757 * PHY firmware image.
3759 * A negative error number will be returned if an error occurs. If
3760 * version number support is available and there's no need to upgrade
3761 * the firmware, 0 will be returned. If firmware is successfully
3762 * transferred to the adapter, 1 will be returned.
3764 * NOTE: some adapters only have local RAM to store the PHY firmware. As
3765 * a result, a RESET of the adapter would cause that RAM to lose its
3766 * contents. Thus, loading PHY firmware on such adapters must happen
3767 * after any FW_RESET_CMDs ...
3769 int t4_load_phy_fw(struct adapter *adap, int win,
3770 int (*phy_fw_version)(const u8 *, size_t),
3771 const u8 *phy_fw_data, size_t phy_fw_size)
3773 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3774 unsigned long mtype = 0, maddr = 0;
3778 /* If we have version number support, then check to see if the adapter
3779 * already has up-to-date PHY firmware loaded.
3781 if (phy_fw_version) {
3782 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3783 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3787 if (cur_phy_fw_ver >= new_phy_fw_vers) {
3788 CH_WARN(adap, "PHY Firmware already up-to-date, "
3789 "version %#x\n", cur_phy_fw_ver);
3794 /* Ask the firmware where it wants us to copy the PHY firmware image.
3795 * The size of the file requires a special version of the READ command
3796 * which will pass the file size via the values field in PARAMS_CMD and
3797 * retrieve the return value from firmware and place it in the same
3800 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3801 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3802 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3803 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3805 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3806 ¶m, &val, 1, true);
3810 maddr = (val & 0xff) << 16;
3812 /* Copy the supplied PHY Firmware image to the adapter memory location
3813 * allocated by the adapter firmware.
3815 ret = t4_memory_rw(adap, win, mtype, maddr,
3816 phy_fw_size, (__be32 *)phy_fw_data,
3821 /* Tell the firmware that the PHY firmware image has been written to
3822 * RAM and it can now start copying it over to the PHYs. The chip
3823 * firmware will RESET the affected PHYs as part of this operation
3824 * leaving them running the new PHY firmware image.
3826 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3827 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3828 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3829 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3830 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3831 ¶m, &val, 30000);
3833 /* If we have version number support, then check to see that the new
3834 * firmware got loaded properly.
3836 if (phy_fw_version) {
3837 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3841 if (cur_phy_fw_ver != new_phy_fw_vers) {
3842 CH_WARN(adap, "PHY Firmware did not update: "
3843 "version on adapter %#x, "
3844 "version flashed %#x\n",
3845 cur_phy_fw_ver, new_phy_fw_vers);
3854 * t4_fwcache - firmware cache operation
3855 * @adap: the adapter
3856 * @op : the operation (flush or flush and invalidate)
3858 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3860 struct fw_params_cmd c;
3862 memset(&c, 0, sizeof(c));
3864 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3865 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3866 FW_PARAMS_CMD_PFN_V(adap->pf) |
3867 FW_PARAMS_CMD_VFN_V(0));
3868 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3870 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3871 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3872 c.param[0].val = cpu_to_be32(op);
3874 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3877 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3878 unsigned int *pif_req_wrptr,
3879 unsigned int *pif_rsp_wrptr)
3882 u32 cfg, val, req, rsp;
3884 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3885 if (cfg & LADBGEN_F)
3886 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3888 val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3889 req = POLADBGWRPTR_G(val);
3890 rsp = PILADBGWRPTR_G(val);
3892 *pif_req_wrptr = req;
3894 *pif_rsp_wrptr = rsp;
3896 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3897 for (j = 0; j < 6; j++) {
3898 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3899 PILADBGRDPTR_V(rsp));
3900 *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3901 *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3905 req = (req + 2) & POLADBGRDPTR_M;
3906 rsp = (rsp + 2) & PILADBGRDPTR_M;
3908 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3911 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3916 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3917 if (cfg & LADBGEN_F)
3918 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3920 for (i = 0; i < CIM_MALA_SIZE; i++) {
3921 for (j = 0; j < 5; j++) {
3923 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3924 PILADBGRDPTR_V(idx));
3925 *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3926 *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3929 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3932 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3936 for (i = 0; i < 8; i++) {
3937 u32 *p = la_buf + i;
3939 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3940 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3941 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3942 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3943 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3947 /* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
3948 * Capabilities which we control with separate controls -- see, for instance,
3949 * Pause Frames and Forward Error Correction. In order to determine what the
3950 * full set of Advertised Port Capabilities are, the base Advertised Port
3951 * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
3952 * Port Capabilities associated with those other controls. See
3953 * t4_link_acaps() for how this is done.
3955 #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3959 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3960 * @caps16: a 16-bit Port Capabilities value
3962 * Returns the equivalent 32-bit Port Capabilities value.
3964 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3966 fw_port_cap32_t caps32 = 0;
3968 #define CAP16_TO_CAP32(__cap) \
3970 if (caps16 & FW_PORT_CAP_##__cap) \
3971 caps32 |= FW_PORT_CAP32_##__cap; \
3974 CAP16_TO_CAP32(SPEED_100M);
3975 CAP16_TO_CAP32(SPEED_1G);
3976 CAP16_TO_CAP32(SPEED_25G);
3977 CAP16_TO_CAP32(SPEED_10G);
3978 CAP16_TO_CAP32(SPEED_40G);
3979 CAP16_TO_CAP32(SPEED_100G);
3980 CAP16_TO_CAP32(FC_RX);
3981 CAP16_TO_CAP32(FC_TX);
3982 CAP16_TO_CAP32(ANEG);
3983 CAP16_TO_CAP32(FORCE_PAUSE);
3984 CAP16_TO_CAP32(MDIAUTO);
3985 CAP16_TO_CAP32(MDISTRAIGHT);
3986 CAP16_TO_CAP32(FEC_RS);
3987 CAP16_TO_CAP32(FEC_BASER_RS);
3988 CAP16_TO_CAP32(802_3_PAUSE);
3989 CAP16_TO_CAP32(802_3_ASM_DIR);
3991 #undef CAP16_TO_CAP32
3997 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3998 * @caps32: a 32-bit Port Capabilities value
4000 * Returns the equivalent 16-bit Port Capabilities value. Note that
4001 * not all 32-bit Port Capabilities can be represented in the 16-bit
4002 * Port Capabilities and some fields/values may not make it.
4004 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
4006 fw_port_cap16_t caps16 = 0;
4008 #define CAP32_TO_CAP16(__cap) \
4010 if (caps32 & FW_PORT_CAP32_##__cap) \
4011 caps16 |= FW_PORT_CAP_##__cap; \
4014 CAP32_TO_CAP16(SPEED_100M);
4015 CAP32_TO_CAP16(SPEED_1G);
4016 CAP32_TO_CAP16(SPEED_10G);
4017 CAP32_TO_CAP16(SPEED_25G);
4018 CAP32_TO_CAP16(SPEED_40G);
4019 CAP32_TO_CAP16(SPEED_100G);
4020 CAP32_TO_CAP16(FC_RX);
4021 CAP32_TO_CAP16(FC_TX);
4022 CAP32_TO_CAP16(802_3_PAUSE);
4023 CAP32_TO_CAP16(802_3_ASM_DIR);
4024 CAP32_TO_CAP16(ANEG);
4025 CAP32_TO_CAP16(FORCE_PAUSE);
4026 CAP32_TO_CAP16(MDIAUTO);
4027 CAP32_TO_CAP16(MDISTRAIGHT);
4028 CAP32_TO_CAP16(FEC_RS);
4029 CAP32_TO_CAP16(FEC_BASER_RS);
4031 #undef CAP32_TO_CAP16
4036 /* Translate Firmware Port Capabilities Pause specification to Common Code */
4037 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
4039 enum cc_pause cc_pause = 0;
4041 if (fw_pause & FW_PORT_CAP32_FC_RX)
4042 cc_pause |= PAUSE_RX;
4043 if (fw_pause & FW_PORT_CAP32_FC_TX)
4044 cc_pause |= PAUSE_TX;
4049 /* Translate Common Code Pause specification into Firmware Port Capabilities */
4050 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
4052 /* Translate orthogonal RX/TX Pause Controls for L1 Configure
4055 fw_port_cap32_t fw_pause = 0;
4057 if (cc_pause & PAUSE_RX)
4058 fw_pause |= FW_PORT_CAP32_FC_RX;
4059 if (cc_pause & PAUSE_TX)
4060 fw_pause |= FW_PORT_CAP32_FC_TX;
4061 if (!(cc_pause & PAUSE_AUTONEG))
4062 fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
4064 /* Translate orthogonal Pause controls into IEEE 802.3 Pause,
4065 * Asymmetrical Pause for use in reporting to upper layer OS code, etc.
4066 * Note that these bits are ignored in L1 Configure commands.
4068 if (cc_pause & PAUSE_RX) {
4069 if (cc_pause & PAUSE_TX)
4070 fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
4072 fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
4073 FW_PORT_CAP32_802_3_PAUSE;
4074 } else if (cc_pause & PAUSE_TX) {
4075 fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
4081 /* Translate Firmware Forward Error Correction specification to Common Code */
4082 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
4084 enum cc_fec cc_fec = 0;
4086 if (fw_fec & FW_PORT_CAP32_FEC_RS)
4088 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
4089 cc_fec |= FEC_BASER_RS;
4094 /* Translate Common Code Forward Error Correction specification to Firmware */
4095 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
4097 fw_port_cap32_t fw_fec = 0;
4099 if (cc_fec & FEC_RS)
4100 fw_fec |= FW_PORT_CAP32_FEC_RS;
4101 if (cc_fec & FEC_BASER_RS)
4102 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
4108 * t4_link_acaps - compute Link Advertised Port Capabilities
4109 * @adapter: the adapter
4110 * @port: the Port ID
4111 * @lc: the Port's Link Configuration
4113 * Synthesize the Advertised Port Capabilities we'll be using based on
4114 * the base Advertised Port Capabilities (which have been filtered by
4115 * ADVERT_MASK) plus the individual controls for things like Pause
4116 * Frames, Forward Error Correction, MDI, etc.
4118 fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
4119 struct link_config *lc)
4121 fw_port_cap32_t fw_fc, fw_fec, acaps;
4122 unsigned int fw_mdi;
4125 fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
4127 /* Convert driver coding of Pause Frame Flow Control settings into the
4130 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4132 /* Convert Common Code Forward Error Control settings into the
4133 * Firmware's API. If the current Requested FEC has "Automatic"
4134 * (IEEE 802.3) specified, then we use whatever the Firmware
4135 * sent us as part of its IEEE 802.3-based interpretation of
4136 * the Transceiver Module EPROM FEC parameters. Otherwise we
4137 * use whatever is in the current Requested FEC settings.
4139 if (lc->requested_fec & FEC_AUTO)
4140 cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4142 cc_fec = lc->requested_fec;
4143 fw_fec = cc_to_fwcap_fec(cc_fec);
4145 /* Figure out what our Requested Port Capabilities are going to be.
4146 * Note parallel structure in t4_handle_get_port_info() and
4147 * init_link_config().
4149 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4150 acaps = lc->acaps | fw_fc | fw_fec;
4151 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4153 } else if (lc->autoneg == AUTONEG_DISABLE) {
4154 acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4155 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4158 acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
4161 /* Some Requested Port Capabilities are trivially wrong if they exceed
4162 * the Physical Port Capabilities. We can check that here and provide
4163 * moderately useful feedback in the system log.
4165 * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
4166 * we need to exclude this from this check in order to maintain
4169 if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
4170 dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
4179 * t4_link_l1cfg_core - apply link configuration to MAC/PHY
4180 * @adapter: the adapter
4181 * @mbox: the Firmware Mailbox to use
4182 * @port: the Port ID
4183 * @lc: the Port's Link Configuration
4184 * @sleep_ok: if true we may sleep while awaiting command completion
4185 * @timeout: time to wait for command to finish before timing out
4186 * (negative implies @sleep_ok=false)
4188 * Set up a port's MAC and PHY according to a desired link configuration.
4189 * - If the PHY can auto-negotiate first decide what to advertise, then
4190 * enable/disable auto-negotiation as desired, and reset.
4191 * - If the PHY does not auto-negotiate just reset it.
4192 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4193 * otherwise do it later based on the outcome of auto-negotiation.
4195 int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
4196 unsigned int port, struct link_config *lc,
4197 u8 sleep_ok, int timeout)
4199 unsigned int fw_caps = adapter->params.fw_caps_support;
4200 struct fw_port_cmd cmd;
4201 fw_port_cap32_t rcap;
4204 if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
4205 lc->autoneg == AUTONEG_ENABLE) {
4209 /* Compute our Requested Port Capabilities and send that on to the
4212 rcap = t4_link_acaps(adapter, port, lc);
4213 memset(&cmd, 0, sizeof(cmd));
4214 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4215 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4216 FW_PORT_CMD_PORTID_V(port));
4217 cmd.action_to_len16 =
4218 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4219 ? FW_PORT_ACTION_L1_CFG
4220 : FW_PORT_ACTION_L1_CFG32) |
4222 if (fw_caps == FW_CAPS16)
4223 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4225 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4227 ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
4230 /* Unfortunately, even if the Requested Port Capabilities "fit" within
4231 * the Physical Port Capabilities, some combinations of features may
4232 * still not be legal. For example, 40Gb/s and Reed-Solomon Forward
4233 * Error Correction. So if the Firmware rejects the L1 Configure
4234 * request, flag that here.
4237 dev_err(adapter->pdev_dev,
4238 "Requested Port Capabilities %#x rejected, error %d\n",
4246 * t4_restart_aneg - restart autonegotiation
4247 * @adap: the adapter
4248 * @mbox: mbox to use for the FW command
4249 * @port: the port id
4251 * Restarts autonegotiation for the selected port.
4253 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4255 unsigned int fw_caps = adap->params.fw_caps_support;
4256 struct fw_port_cmd c;
4258 memset(&c, 0, sizeof(c));
4259 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4260 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4261 FW_PORT_CMD_PORTID_V(port));
4263 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4264 ? FW_PORT_ACTION_L1_CFG
4265 : FW_PORT_ACTION_L1_CFG32) |
4267 if (fw_caps == FW_CAPS16)
4268 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4270 c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
4271 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4274 typedef void (*int_handler_t)(struct adapter *adap);
4277 unsigned int mask; /* bits to check in interrupt status */
4278 const char *msg; /* message to print or NULL */
4279 short stat_idx; /* stat counter to increment or -1 */
4280 unsigned short fatal; /* whether the condition reported is fatal */
4281 int_handler_t int_handler; /* platform-specific int handler */
4285 * t4_handle_intr_status - table driven interrupt handler
4286 * @adapter: the adapter that generated the interrupt
4287 * @reg: the interrupt status register to process
4288 * @acts: table of interrupt actions
4290 * A table driven interrupt handler that applies a set of masks to an
4291 * interrupt status word and performs the corresponding actions if the
4292 * interrupts described by the mask have occurred. The actions include
4293 * optionally emitting a warning or alert message. The table is terminated
4294 * by an entry specifying mask 0. Returns the number of fatal interrupt
4297 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4298 const struct intr_info *acts)
4301 unsigned int mask = 0;
4302 unsigned int status = t4_read_reg(adapter, reg);
4304 for ( ; acts->mask; ++acts) {
4305 if (!(status & acts->mask))
4309 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4310 status & acts->mask);
4311 } else if (acts->msg && printk_ratelimit())
4312 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4313 status & acts->mask);
4314 if (acts->int_handler)
4315 acts->int_handler(adapter);
4319 if (status) /* clear processed interrupts */
4320 t4_write_reg(adapter, reg, status);
4325 * Interrupt handler for the PCIE module.
4327 static void pcie_intr_handler(struct adapter *adapter)
4329 static const struct intr_info sysbus_intr_info[] = {
4330 { RNPP_F, "RXNP array parity error", -1, 1 },
4331 { RPCP_F, "RXPC array parity error", -1, 1 },
4332 { RCIP_F, "RXCIF array parity error", -1, 1 },
4333 { RCCP_F, "Rx completions control array parity error", -1, 1 },
4334 { RFTP_F, "RXFT array parity error", -1, 1 },
4337 static const struct intr_info pcie_port_intr_info[] = {
4338 { TPCP_F, "TXPC array parity error", -1, 1 },
4339 { TNPP_F, "TXNP array parity error", -1, 1 },
4340 { TFTP_F, "TXFT array parity error", -1, 1 },
4341 { TCAP_F, "TXCA array parity error", -1, 1 },
4342 { TCIP_F, "TXCIF array parity error", -1, 1 },
4343 { RCAP_F, "RXCA array parity error", -1, 1 },
4344 { OTDD_F, "outbound request TLP discarded", -1, 1 },
4345 { RDPE_F, "Rx data parity error", -1, 1 },
4346 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
4349 static const struct intr_info pcie_intr_info[] = {
4350 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4351 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4352 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4353 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4354 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4355 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4356 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4357 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4358 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4359 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4360 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4361 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4362 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4363 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4364 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4365 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4366 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4367 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4368 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4369 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4370 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4371 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4372 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4373 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4374 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4375 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4376 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4377 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
4378 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
4379 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
4384 static struct intr_info t5_pcie_intr_info[] = {
4385 { MSTGRPPERR_F, "Master Response Read Queue parity error",
4387 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4388 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4389 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4390 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4391 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4392 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4393 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
4395 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
4397 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4398 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4399 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4400 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4401 { DREQWRPERR_F, "PCI DMA channel write request parity error",
4403 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4404 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4405 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4406 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4407 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4408 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4409 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4410 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4411 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4412 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4413 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
4415 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4417 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4418 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4419 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4420 { READRSPERR_F, "Outbound read error", -1, 0 },
4426 if (is_t4(adapter->params.chip))
4427 fat = t4_handle_intr_status(adapter,
4428 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4430 t4_handle_intr_status(adapter,
4431 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4432 pcie_port_intr_info) +
4433 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4436 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4440 t4_fatal_err(adapter);
4444 * TP interrupt handler.
4446 static void tp_intr_handler(struct adapter *adapter)
4448 static const struct intr_info tp_intr_info[] = {
4449 { 0x3fffffff, "TP parity error", -1, 1 },
4450 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
4454 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
4455 t4_fatal_err(adapter);
4459 * SGE interrupt handler.
4461 static void sge_intr_handler(struct adapter *adapter)
4466 static const struct intr_info sge_intr_info[] = {
4467 { ERR_CPL_EXCEED_IQE_SIZE_F,
4468 "SGE received CPL exceeding IQE size", -1, 1 },
4469 { ERR_INVALID_CIDX_INC_F,
4470 "SGE GTS CIDX increment too large", -1, 0 },
4471 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4472 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
4473 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
4474 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4475 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
4477 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
4479 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
4481 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
4483 { ERR_ING_CTXT_PRIO_F,
4484 "SGE too many priority ingress contexts", -1, 0 },
4485 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4486 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
4490 static struct intr_info t4t5_sge_intr_info[] = {
4491 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4492 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4493 { ERR_EGR_CTXT_PRIO_F,
4494 "SGE too many priority egress contexts", -1, 0 },
4498 perr = t4_read_reg(adapter, SGE_INT_CAUSE1_A);
4501 dev_alert(adapter->pdev_dev, "SGE Cause1 Parity Error %#x\n",
4505 perr = t4_read_reg(adapter, SGE_INT_CAUSE2_A);
4508 dev_alert(adapter->pdev_dev, "SGE Cause2 Parity Error %#x\n",
4512 if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
4513 perr = t4_read_reg(adapter, SGE_INT_CAUSE5_A);
4514 /* Parity error (CRC) for err_T_RxCRC is trivial, ignore it */
4515 perr &= ~ERR_T_RXCRC_F;
4518 dev_alert(adapter->pdev_dev,
4519 "SGE Cause5 Parity Error %#x\n", perr);
4523 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4524 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4525 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4526 t4t5_sge_intr_info);
4528 err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4529 if (err & ERROR_QID_VALID_F) {
4530 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4532 if (err & UNCAPTURED_ERROR_F)
4533 dev_err(adapter->pdev_dev,
4534 "SGE UNCAPTURED_ERROR set (clearing)\n");
4535 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4536 UNCAPTURED_ERROR_F);
4540 t4_fatal_err(adapter);
4543 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4544 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4545 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4546 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4549 * CIM interrupt handler.
4551 static void cim_intr_handler(struct adapter *adapter)
4553 static const struct intr_info cim_intr_info[] = {
4554 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4555 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4556 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4557 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4558 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4559 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4560 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4561 { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4564 static const struct intr_info cim_upintr_info[] = {
4565 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4566 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4567 { ILLWRINT_F, "CIM illegal write", -1, 1 },
4568 { ILLRDINT_F, "CIM illegal read", -1, 1 },
4569 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4570 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4571 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4572 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4573 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4574 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4575 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4576 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4577 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4578 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4579 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4580 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4581 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4582 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4583 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4584 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4585 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4586 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4587 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4588 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4589 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4590 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4591 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4592 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4599 fw_err = t4_read_reg(adapter, PCIE_FW_A);
4600 if (fw_err & PCIE_FW_ERR_F)
4601 t4_report_fw_error(adapter);
4603 /* When the Firmware detects an internal error which normally
4604 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4605 * in order to make sure the Host sees the Firmware Crash. So
4606 * if we have a Timer0 interrupt and don't see a Firmware Crash,
4607 * ignore the Timer0 interrupt.
4610 val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4611 if (val & TIMER0INT_F)
4612 if (!(fw_err & PCIE_FW_ERR_F) ||
4613 (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4614 t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4617 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4619 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4622 t4_fatal_err(adapter);
4626 * ULP RX interrupt handler.
4628 static void ulprx_intr_handler(struct adapter *adapter)
4630 static const struct intr_info ulprx_intr_info[] = {
4631 { 0x1800000, "ULPRX context error", -1, 1 },
4632 { 0x7fffff, "ULPRX parity error", -1, 1 },
4636 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4637 t4_fatal_err(adapter);
4641 * ULP TX interrupt handler.
4643 static void ulptx_intr_handler(struct adapter *adapter)
4645 static const struct intr_info ulptx_intr_info[] = {
4646 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4648 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4650 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4652 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4654 { 0xfffffff, "ULPTX parity error", -1, 1 },
4658 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4659 t4_fatal_err(adapter);
4663 * PM TX interrupt handler.
4665 static void pmtx_intr_handler(struct adapter *adapter)
4667 static const struct intr_info pmtx_intr_info[] = {
4668 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4669 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4670 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4671 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4672 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4673 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4674 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4676 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4677 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4681 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4682 t4_fatal_err(adapter);
4686 * PM RX interrupt handler.
4688 static void pmrx_intr_handler(struct adapter *adapter)
4690 static const struct intr_info pmrx_intr_info[] = {
4691 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4692 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4693 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4694 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4696 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4697 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4701 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4702 t4_fatal_err(adapter);
4706 * CPL switch interrupt handler.
4708 static void cplsw_intr_handler(struct adapter *adapter)
4710 static const struct intr_info cplsw_intr_info[] = {
4711 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4712 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4713 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4714 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4715 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4716 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4720 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4721 t4_fatal_err(adapter);
4725 * LE interrupt handler.
4727 static void le_intr_handler(struct adapter *adap)
4729 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4730 static const struct intr_info le_intr_info[] = {
4731 { LIPMISS_F, "LE LIP miss", -1, 0 },
4732 { LIP0_F, "LE 0 LIP error", -1, 0 },
4733 { PARITYERR_F, "LE parity error", -1, 1 },
4734 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4735 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
4739 static struct intr_info t6_le_intr_info[] = {
4740 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4741 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4742 { CMDTIDERR_F, "LE cmd tid error", -1, 1 },
4743 { TCAMINTPERR_F, "LE parity error", -1, 1 },
4744 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4745 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4746 { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 },
4750 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4751 (chip <= CHELSIO_T5) ?
4752 le_intr_info : t6_le_intr_info))
4757 * MPS interrupt handler.
4759 static void mps_intr_handler(struct adapter *adapter)
4761 static const struct intr_info mps_rx_intr_info[] = {
4762 { 0xffffff, "MPS Rx parity error", -1, 1 },
4765 static const struct intr_info mps_tx_intr_info[] = {
4766 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4767 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4768 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4770 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4772 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4773 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4774 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4777 static const struct intr_info t6_mps_tx_intr_info[] = {
4778 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4779 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4780 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4782 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4784 /* MPS Tx Bubble is normal for T6 */
4785 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4786 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4789 static const struct intr_info mps_trc_intr_info[] = {
4790 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4791 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4793 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4796 static const struct intr_info mps_stat_sram_intr_info[] = {
4797 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4800 static const struct intr_info mps_stat_tx_intr_info[] = {
4801 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4804 static const struct intr_info mps_stat_rx_intr_info[] = {
4805 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4808 static const struct intr_info mps_cls_intr_info[] = {
4809 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4810 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4811 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4817 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4819 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4820 is_t6(adapter->params.chip)
4821 ? t6_mps_tx_intr_info
4822 : mps_tx_intr_info) +
4823 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4824 mps_trc_intr_info) +
4825 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4826 mps_stat_sram_intr_info) +
4827 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4828 mps_stat_tx_intr_info) +
4829 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4830 mps_stat_rx_intr_info) +
4831 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4834 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4835 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
4837 t4_fatal_err(adapter);
4840 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4844 * EDC/MC interrupt handler.
4846 static void mem_intr_handler(struct adapter *adapter, int idx)
4848 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4850 unsigned int addr, cnt_addr, v;
4852 if (idx <= MEM_EDC1) {
4853 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4854 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4855 } else if (idx == MEM_MC) {
4856 if (is_t4(adapter->params.chip)) {
4857 addr = MC_INT_CAUSE_A;
4858 cnt_addr = MC_ECC_STATUS_A;
4860 addr = MC_P_INT_CAUSE_A;
4861 cnt_addr = MC_P_ECC_STATUS_A;
4864 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4865 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4868 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4869 if (v & PERR_INT_CAUSE_F)
4870 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4872 if (v & ECC_CE_INT_CAUSE_F) {
4873 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4875 t4_edc_err_read(adapter, idx);
4877 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4878 if (printk_ratelimit())
4879 dev_warn(adapter->pdev_dev,
4880 "%u %s correctable ECC data error%s\n",
4881 cnt, name[idx], cnt > 1 ? "s" : "");
4883 if (v & ECC_UE_INT_CAUSE_F)
4884 dev_alert(adapter->pdev_dev,
4885 "%s uncorrectable ECC data error\n", name[idx]);
4887 t4_write_reg(adapter, addr, v);
4888 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4889 t4_fatal_err(adapter);
4893 * MA interrupt handler.
4895 static void ma_intr_handler(struct adapter *adap)
4897 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4899 if (status & MEM_PERR_INT_CAUSE_F) {
4900 dev_alert(adap->pdev_dev,
4901 "MA parity error, parity status %#x\n",
4902 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4903 if (is_t5(adap->params.chip))
4904 dev_alert(adap->pdev_dev,
4905 "MA parity error, parity status %#x\n",
4907 MA_PARITY_ERROR_STATUS2_A));
4909 if (status & MEM_WRAP_INT_CAUSE_F) {
4910 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4911 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4912 "client %u to address %#x\n",
4913 MEM_WRAP_CLIENT_NUM_G(v),
4914 MEM_WRAP_ADDRESS_G(v) << 4);
4916 t4_write_reg(adap, MA_INT_CAUSE_A, status);
4921 * SMB interrupt handler.
4923 static void smb_intr_handler(struct adapter *adap)
4925 static const struct intr_info smb_intr_info[] = {
4926 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4927 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4928 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4932 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4937 * NC-SI interrupt handler.
4939 static void ncsi_intr_handler(struct adapter *adap)
4941 static const struct intr_info ncsi_intr_info[] = {
4942 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4943 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4944 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4945 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4949 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4954 * XGMAC interrupt handler.
4956 static void xgmac_intr_handler(struct adapter *adap, int port)
4958 u32 v, int_cause_reg;
4960 if (is_t4(adap->params.chip))
4961 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4963 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4965 v = t4_read_reg(adap, int_cause_reg);
4967 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4971 if (v & TXFIFO_PRTY_ERR_F)
4972 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4974 if (v & RXFIFO_PRTY_ERR_F)
4975 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4977 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4982 * PL interrupt handler.
4984 static void pl_intr_handler(struct adapter *adap)
4986 static const struct intr_info pl_intr_info[] = {
4987 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
4988 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
4992 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
4996 #define PF_INTR_MASK (PFSW_F)
4997 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
4998 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
4999 CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
5002 * t4_slow_intr_handler - control path interrupt handler
5003 * @adapter: the adapter
5005 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
5006 * The designation 'slow' is because it involves register reads, while
5007 * data interrupts typically don't involve any MMIOs.
5009 int t4_slow_intr_handler(struct adapter *adapter)
5011 /* There are rare cases where a PL_INT_CAUSE bit may end up getting
5012 * set when the corresponding PL_INT_ENABLE bit isn't set. It's
5013 * easiest just to mask that case here.
5015 u32 raw_cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
5016 u32 enable = t4_read_reg(adapter, PL_INT_ENABLE_A);
5017 u32 cause = raw_cause & enable;
5019 if (!(cause & GLBL_INTR_MASK))
5022 cim_intr_handler(adapter);
5024 mps_intr_handler(adapter);
5026 ncsi_intr_handler(adapter);
5028 pl_intr_handler(adapter);
5030 smb_intr_handler(adapter);
5031 if (cause & XGMAC0_F)
5032 xgmac_intr_handler(adapter, 0);
5033 if (cause & XGMAC1_F)
5034 xgmac_intr_handler(adapter, 1);
5035 if (cause & XGMAC_KR0_F)
5036 xgmac_intr_handler(adapter, 2);
5037 if (cause & XGMAC_KR1_F)
5038 xgmac_intr_handler(adapter, 3);
5040 pcie_intr_handler(adapter);
5042 mem_intr_handler(adapter, MEM_MC);
5043 if (is_t5(adapter->params.chip) && (cause & MC1_F))
5044 mem_intr_handler(adapter, MEM_MC1);
5046 mem_intr_handler(adapter, MEM_EDC0);
5048 mem_intr_handler(adapter, MEM_EDC1);
5050 le_intr_handler(adapter);
5052 tp_intr_handler(adapter);
5054 ma_intr_handler(adapter);
5055 if (cause & PM_TX_F)
5056 pmtx_intr_handler(adapter);
5057 if (cause & PM_RX_F)
5058 pmrx_intr_handler(adapter);
5059 if (cause & ULP_RX_F)
5060 ulprx_intr_handler(adapter);
5061 if (cause & CPL_SWITCH_F)
5062 cplsw_intr_handler(adapter);
5064 sge_intr_handler(adapter);
5065 if (cause & ULP_TX_F)
5066 ulptx_intr_handler(adapter);
5068 /* Clear the interrupts just processed for which we are the master. */
5069 t4_write_reg(adapter, PL_INT_CAUSE_A, raw_cause & GLBL_INTR_MASK);
5070 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
5075 * t4_intr_enable - enable interrupts
5076 * @adapter: the adapter whose interrupts should be enabled
5078 * Enable PF-specific interrupts for the calling function and the top-level
5079 * interrupt concentrator for global interrupts. Interrupts are already
5080 * enabled at each module, here we just enable the roots of the interrupt
5083 * Note: this function should be called only when the driver manages
5084 * non PF-specific interrupts from the various HW modules. Only one PCI
5085 * function at a time should be doing this.
5087 void t4_intr_enable(struct adapter *adapter)
5090 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5091 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5092 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5094 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5095 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
5096 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
5097 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
5098 ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
5099 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
5100 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
5101 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
5102 DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
5103 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
5104 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
5108 * t4_intr_disable - disable interrupts
5109 * @adapter: the adapter whose interrupts should be disabled
5111 * Disable interrupts. We only disable the top-level interrupt
5112 * concentrators. The caller must be a PCI function managing global
5115 void t4_intr_disable(struct adapter *adapter)
5119 if (pci_channel_offline(adapter->pdev))
5122 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5123 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5124 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5126 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
5127 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
5130 unsigned int t4_chip_rss_size(struct adapter *adap)
5132 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
5133 return RSS_NENTRIES;
5135 return T6_RSS_NENTRIES;
5139 * t4_config_rss_range - configure a portion of the RSS mapping table
5140 * @adapter: the adapter
5141 * @mbox: mbox to use for the FW command
5142 * @viid: virtual interface whose RSS subtable is to be written
5143 * @start: start entry in the table to write
5144 * @n: how many table entries to write
5145 * @rspq: values for the response queue lookup table
5146 * @nrspq: number of values in @rspq
5148 * Programs the selected part of the VI's RSS mapping table with the
5149 * provided values. If @nrspq < @n the supplied values are used repeatedly
5150 * until the full table range is populated.
5152 * The caller must ensure the values in @rspq are in the range allowed for
5155 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5156 int start, int n, const u16 *rspq, unsigned int nrspq)
5159 const u16 *rsp = rspq;
5160 const u16 *rsp_end = rspq + nrspq;
5161 struct fw_rss_ind_tbl_cmd cmd;
5163 memset(&cmd, 0, sizeof(cmd));
5164 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
5165 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5166 FW_RSS_IND_TBL_CMD_VIID_V(viid));
5167 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5169 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
5171 int nq = min(n, 32);
5172 __be32 *qp = &cmd.iq0_to_iq2;
5174 cmd.niqid = cpu_to_be16(nq);
5175 cmd.startidx = cpu_to_be16(start);
5183 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
5184 if (++rsp >= rsp_end)
5186 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
5187 if (++rsp >= rsp_end)
5189 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
5190 if (++rsp >= rsp_end)
5193 *qp++ = cpu_to_be32(v);
5197 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5205 * t4_config_glbl_rss - configure the global RSS mode
5206 * @adapter: the adapter
5207 * @mbox: mbox to use for the FW command
5208 * @mode: global RSS mode
5209 * @flags: mode-specific flags
5211 * Sets the global RSS mode.
5213 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5216 struct fw_rss_glb_config_cmd c;
5218 memset(&c, 0, sizeof(c));
5219 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5220 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5221 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5222 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5223 c.u.manual.mode_pkd =
5224 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5225 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5226 c.u.basicvirtual.mode_pkd =
5227 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5228 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5231 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5235 * t4_config_vi_rss - configure per VI RSS settings
5236 * @adapter: the adapter
5237 * @mbox: mbox to use for the FW command
5240 * @defq: id of the default RSS queue for the VI.
5242 * Configures VI-specific RSS properties.
5244 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5245 unsigned int flags, unsigned int defq)
5247 struct fw_rss_vi_config_cmd c;
5249 memset(&c, 0, sizeof(c));
5250 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5251 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5252 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5253 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5254 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5255 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5256 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5259 /* Read an RSS table row */
5260 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5262 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5263 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5268 * t4_read_rss - read the contents of the RSS mapping table
5269 * @adapter: the adapter
5270 * @map: holds the contents of the RSS mapping table
5272 * Reads the contents of the RSS hash->queue mapping table.
5274 int t4_read_rss(struct adapter *adapter, u16 *map)
5276 int i, ret, nentries;
5279 nentries = t4_chip_rss_size(adapter);
5280 for (i = 0; i < nentries / 2; ++i) {
5281 ret = rd_rss_row(adapter, i, &val);
5284 *map++ = LKPTBLQUEUE0_G(val);
5285 *map++ = LKPTBLQUEUE1_G(val);
5290 static unsigned int t4_use_ldst(struct adapter *adap)
5292 return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
5296 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5297 * @adap: the adapter
5298 * @cmd: TP fw ldst address space type
5299 * @vals: where the indirect register values are stored/written
5300 * @nregs: how many indirect registers to read/write
5301 * @start_index: index of first indirect register to read/write
5302 * @rw: Read (1) or Write (0)
5303 * @sleep_ok: if true we may sleep while awaiting command completion
5305 * Access TP indirect registers through LDST
5307 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5308 unsigned int nregs, unsigned int start_index,
5309 unsigned int rw, bool sleep_ok)
5313 struct fw_ldst_cmd c;
5315 for (i = 0; i < nregs; i++) {
5316 memset(&c, 0, sizeof(c));
5317 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5319 (rw ? FW_CMD_READ_F :
5321 FW_LDST_CMD_ADDRSPACE_V(cmd));
5322 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5324 c.u.addrval.addr = cpu_to_be32(start_index + i);
5325 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5326 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5332 vals[i] = be32_to_cpu(c.u.addrval.val);
5338 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5339 * @adap: the adapter
5340 * @reg_addr: Address Register
5341 * @reg_data: Data register
5342 * @buff: where the indirect register values are stored/written
5343 * @nregs: how many indirect registers to read/write
5344 * @start_index: index of first indirect register to read/write
5345 * @rw: READ(1) or WRITE(0)
5346 * @sleep_ok: if true we may sleep while awaiting command completion
5348 * Read/Write TP indirect registers through LDST if possible.
5349 * Else, use backdoor access
5351 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5352 u32 *buff, u32 nregs, u32 start_index, int rw,
5360 cmd = FW_LDST_ADDRSPC_TP_PIO;
5362 case TP_TM_PIO_ADDR_A:
5363 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5365 case TP_MIB_INDEX_A:
5366 cmd = FW_LDST_ADDRSPC_TP_MIB;
5369 goto indirect_access;
5372 if (t4_use_ldst(adap))
5373 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5380 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5383 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5389 * t4_tp_pio_read - Read TP PIO registers
5390 * @adap: the adapter
5391 * @buff: where the indirect register values are written
5392 * @nregs: how many indirect registers to read
5393 * @start_index: index of first indirect register to read
5394 * @sleep_ok: if true we may sleep while awaiting command completion
5396 * Read TP PIO Registers
5398 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5399 u32 start_index, bool sleep_ok)
5401 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5402 start_index, 1, sleep_ok);
5406 * t4_tp_pio_write - Write TP PIO registers
5407 * @adap: the adapter
5408 * @buff: where the indirect register values are stored
5409 * @nregs: how many indirect registers to write
5410 * @start_index: index of first indirect register to write
5411 * @sleep_ok: if true we may sleep while awaiting command completion
5413 * Write TP PIO Registers
5415 static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5416 u32 start_index, bool sleep_ok)
5418 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5419 start_index, 0, sleep_ok);
5423 * t4_tp_tm_pio_read - Read TP TM PIO registers
5424 * @adap: the adapter
5425 * @buff: where the indirect register values are written
5426 * @nregs: how many indirect registers to read
5427 * @start_index: index of first indirect register to read
5428 * @sleep_ok: if true we may sleep while awaiting command completion
5430 * Read TP TM PIO Registers
5432 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5433 u32 start_index, bool sleep_ok)
5435 t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5436 nregs, start_index, 1, sleep_ok);
5440 * t4_tp_mib_read - Read TP MIB registers
5441 * @adap: the adapter
5442 * @buff: where the indirect register values are written
5443 * @nregs: how many indirect registers to read
5444 * @start_index: index of first indirect register to read
5445 * @sleep_ok: if true we may sleep while awaiting command completion
5447 * Read TP MIB Registers
5449 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5452 t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5453 start_index, 1, sleep_ok);
5457 * t4_read_rss_key - read the global RSS key
5458 * @adap: the adapter
5459 * @key: 10-entry array holding the 320-bit RSS key
5460 * @sleep_ok: if true we may sleep while awaiting command completion
5462 * Reads the global 320-bit RSS key.
5464 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5466 t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5470 * t4_write_rss_key - program one of the RSS keys
5471 * @adap: the adapter
5472 * @key: 10-entry array holding the 320-bit RSS key
5473 * @idx: which RSS key to write
5474 * @sleep_ok: if true we may sleep while awaiting command completion
5476 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5477 * 0..15 the corresponding entry in the RSS key table is written,
5478 * otherwise the global RSS key is written.
5480 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5483 u8 rss_key_addr_cnt = 16;
5484 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5486 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5487 * allows access to key addresses 16-63 by using KeyWrAddrX
5488 * as index[5:4](upper 2) into key table
5490 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5491 (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5492 rss_key_addr_cnt = 32;
5494 t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5496 if (idx >= 0 && idx < rss_key_addr_cnt) {
5497 if (rss_key_addr_cnt > 16)
5498 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5499 KEYWRADDRX_V(idx >> 4) |
5500 T6_VFWRADDR_V(idx) | KEYWREN_F);
5502 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5503 KEYWRADDR_V(idx) | KEYWREN_F);
5508 * t4_read_rss_pf_config - read PF RSS Configuration Table
5509 * @adapter: the adapter
5510 * @index: the entry in the PF RSS table to read
5511 * @valp: where to store the returned value
5512 * @sleep_ok: if true we may sleep while awaiting command completion
5514 * Reads the PF RSS Configuration Table at the specified index and returns
5515 * the value found there.
5517 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5518 u32 *valp, bool sleep_ok)
5520 t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
5524 * t4_read_rss_vf_config - read VF RSS Configuration Table
5525 * @adapter: the adapter
5526 * @index: the entry in the VF RSS table to read
5527 * @vfl: where to store the returned VFL
5528 * @vfh: where to store the returned VFH
5529 * @sleep_ok: if true we may sleep while awaiting command completion
5531 * Reads the VF RSS Configuration Table at the specified index and returns
5532 * the (VFL, VFH) values found there.
5534 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5535 u32 *vfl, u32 *vfh, bool sleep_ok)
5537 u32 vrt, mask, data;
5539 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5540 mask = VFWRADDR_V(VFWRADDR_M);
5541 data = VFWRADDR_V(index);
5543 mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
5544 data = T6_VFWRADDR_V(index);
5547 /* Request that the index'th VF Table values be read into VFL/VFH.
5549 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5550 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5551 vrt |= data | VFRDEN_F;
5552 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5554 /* Grab the VFL/VFH values ...
5556 t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5557 t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
5561 * t4_read_rss_pf_map - read PF RSS Map
5562 * @adapter: the adapter
5563 * @sleep_ok: if true we may sleep while awaiting command completion
5565 * Reads the PF RSS Map register and returns its value.
5567 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5571 t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
5576 * t4_read_rss_pf_mask - read PF RSS Mask
5577 * @adapter: the adapter
5578 * @sleep_ok: if true we may sleep while awaiting command completion
5580 * Reads the PF RSS Mask register and returns its value.
5582 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5586 t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
5591 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5592 * @adap: the adapter
5593 * @v4: holds the TCP/IP counter values
5594 * @v6: holds the TCP/IPv6 counter values
5595 * @sleep_ok: if true we may sleep while awaiting command completion
5597 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5598 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5600 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5601 struct tp_tcp_stats *v6, bool sleep_ok)
5603 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
5605 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
5606 #define STAT(x) val[STAT_IDX(x)]
5607 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5610 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5611 TP_MIB_TCP_OUT_RST_A, sleep_ok);
5612 v4->tcp_out_rsts = STAT(OUT_RST);
5613 v4->tcp_in_segs = STAT64(IN_SEG);
5614 v4->tcp_out_segs = STAT64(OUT_SEG);
5615 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5618 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5619 TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
5620 v6->tcp_out_rsts = STAT(OUT_RST);
5621 v6->tcp_in_segs = STAT64(IN_SEG);
5622 v6->tcp_out_segs = STAT64(OUT_SEG);
5623 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5631 * t4_tp_get_err_stats - read TP's error MIB counters
5632 * @adap: the adapter
5633 * @st: holds the counter values
5634 * @sleep_ok: if true we may sleep while awaiting command completion
5636 * Returns the values of TP's error counters.
5638 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5641 int nchan = adap->params.arch.nchan;
5643 t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5645 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5647 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5649 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5650 TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5651 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5652 TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5653 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5655 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5656 TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5657 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5658 TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5659 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5664 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5665 * @adap: the adapter
5666 * @st: holds the counter values
5667 * @sleep_ok: if true we may sleep while awaiting command completion
5669 * Returns the values of TP's CPL counters.
5671 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5674 int nchan = adap->params.arch.nchan;
5676 t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
5678 t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
5682 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5683 * @adap: the adapter
5684 * @st: holds the counter values
5685 * @sleep_ok: if true we may sleep while awaiting command completion
5687 * Returns the values of TP's RDMA counters.
5689 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5692 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5697 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5698 * @adap: the adapter
5699 * @idx: the port index
5700 * @st: holds the counter values
5701 * @sleep_ok: if true we may sleep while awaiting command completion
5703 * Returns the values of TP's FCoE counters for the selected port.
5705 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5706 struct tp_fcoe_stats *st, bool sleep_ok)
5710 t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5713 t4_tp_mib_read(adap, &st->frames_drop, 1,
5714 TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5716 t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5719 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5723 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5724 * @adap: the adapter
5725 * @st: holds the counter values
5726 * @sleep_ok: if true we may sleep while awaiting command completion
5728 * Returns the values of TP's counters for non-TCP directly-placed packets.
5730 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5735 t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
5736 st->frames = val[0];
5738 st->octets = ((u64)val[2] << 32) | val[3];
5742 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5743 * @adap: the adapter
5744 * @mtus: where to store the MTU values
5745 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5747 * Reads the HW path MTU table.
5749 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5754 for (i = 0; i < NMTUS; ++i) {
5755 t4_write_reg(adap, TP_MTU_TABLE_A,
5756 MTUINDEX_V(0xff) | MTUVALUE_V(i));
5757 v = t4_read_reg(adap, TP_MTU_TABLE_A);
5758 mtus[i] = MTUVALUE_G(v);
5760 mtu_log[i] = MTUWIDTH_G(v);
5765 * t4_read_cong_tbl - reads the congestion control table
5766 * @adap: the adapter
5767 * @incr: where to store the alpha values
5769 * Reads the additive increments programmed into the HW congestion
5772 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5774 unsigned int mtu, w;
5776 for (mtu = 0; mtu < NMTUS; ++mtu)
5777 for (w = 0; w < NCCTRL_WIN; ++w) {
5778 t4_write_reg(adap, TP_CCTRL_TABLE_A,
5779 ROWINDEX_V(0xffff) | (mtu << 5) | w);
5780 incr[mtu][w] = (u16)t4_read_reg(adap,
5781 TP_CCTRL_TABLE_A) & 0x1fff;
5786 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5787 * @adap: the adapter
5788 * @addr: the indirect TP register address
5789 * @mask: specifies the field within the register to modify
5790 * @val: new value for the field
5792 * Sets a field of an indirect TP register to the given value.
5794 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5795 unsigned int mask, unsigned int val)
5797 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5798 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5799 t4_write_reg(adap, TP_PIO_DATA_A, val);
5803 * init_cong_ctrl - initialize congestion control parameters
5804 * @a: the alpha values for congestion control
5805 * @b: the beta values for congestion control
5807 * Initialize the congestion control parameters.
5809 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5811 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5836 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5839 b[13] = b[14] = b[15] = b[16] = 3;
5840 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5841 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5846 /* The minimum additive increment value for the congestion control table */
5847 #define CC_MIN_INCR 2U
5850 * t4_load_mtus - write the MTU and congestion control HW tables
5851 * @adap: the adapter
5852 * @mtus: the values for the MTU table
5853 * @alpha: the values for the congestion control alpha parameter
5854 * @beta: the values for the congestion control beta parameter
5856 * Write the HW MTU table with the supplied MTUs and the high-speed
5857 * congestion control table with the supplied alpha, beta, and MTUs.
5858 * We write the two tables together because the additive increments
5859 * depend on the MTUs.
5861 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5862 const unsigned short *alpha, const unsigned short *beta)
5864 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5865 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5866 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5867 28672, 40960, 57344, 81920, 114688, 163840, 229376
5872 for (i = 0; i < NMTUS; ++i) {
5873 unsigned int mtu = mtus[i];
5874 unsigned int log2 = fls(mtu);
5876 if (!(mtu & ((1 << log2) >> 2))) /* round */
5878 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5879 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5881 for (w = 0; w < NCCTRL_WIN; ++w) {
5884 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5887 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5888 (w << 16) | (beta[w] << 13) | inc);
5893 /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5894 * clocks. The formula is
5896 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5898 * which is equivalent to
5900 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5902 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5904 u64 v = bytes256 * adap->params.vpd.cclk;
5906 return v * 62 + v / 2;
5910 * t4_get_chan_txrate - get the current per channel Tx rates
5911 * @adap: the adapter
5912 * @nic_rate: rates for NIC traffic
5913 * @ofld_rate: rates for offloaded traffic
5915 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5918 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5922 v = t4_read_reg(adap, TP_TX_TRATE_A);
5923 nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5924 nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5925 if (adap->params.arch.nchan == NCHAN) {
5926 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5927 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5930 v = t4_read_reg(adap, TP_TX_ORATE_A);
5931 ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5932 ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5933 if (adap->params.arch.nchan == NCHAN) {
5934 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5935 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5940 * t4_set_trace_filter - configure one of the tracing filters
5941 * @adap: the adapter
5942 * @tp: the desired trace filter parameters
5943 * @idx: which filter to configure
5944 * @enable: whether to enable or disable the filter
5946 * Configures one of the tracing filters available in HW. If @enable is
5947 * %0 @tp is not examined and may be %NULL. The user is responsible to
5948 * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5950 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5951 int idx, int enable)
5953 int i, ofst = idx * 4;
5954 u32 data_reg, mask_reg, cfg;
5957 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5961 cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5962 if (cfg & TRCMULTIFILTER_F) {
5963 /* If multiple tracers are enabled, then maximum
5964 * capture size is 2.5KB (FIFO size of a single channel)
5965 * minus 2 flits for CPL_TRACE_PKT header.
5967 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5970 /* If multiple tracers are disabled, to avoid deadlocks
5971 * maximum packet capture size of 9600 bytes is recommended.
5972 * Also in this mode, only trace0 can be enabled and running.
5974 if (tp->snap_len > 9600 || idx)
5978 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5979 tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5980 tp->min_len > TFMINPKTSIZE_M)
5983 /* stop the tracer we'll be changing */
5984 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5986 idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5987 data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5988 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5990 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5991 t4_write_reg(adap, data_reg, tp->data[i]);
5992 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5994 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5995 TFCAPTUREMAX_V(tp->snap_len) |
5996 TFMINPKTSIZE_V(tp->min_len));
5997 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
5998 TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
5999 (is_t4(adap->params.chip) ?
6000 TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
6001 T5_TFPORT_V(tp->port) | T5_TFEN_F |
6002 T5_TFINVERTMATCH_V(tp->invert)));
6008 * t4_get_trace_filter - query one of the tracing filters
6009 * @adap: the adapter
6010 * @tp: the current trace filter parameters
6011 * @idx: which trace filter to query
6012 * @enabled: non-zero if the filter is enabled
6014 * Returns the current settings of one of the HW tracing filters.
6016 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6020 int i, ofst = idx * 4;
6021 u32 data_reg, mask_reg;
6023 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
6024 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
6026 if (is_t4(adap->params.chip)) {
6027 *enabled = !!(ctla & TFEN_F);
6028 tp->port = TFPORT_G(ctla);
6029 tp->invert = !!(ctla & TFINVERTMATCH_F);
6031 *enabled = !!(ctla & T5_TFEN_F);
6032 tp->port = T5_TFPORT_G(ctla);
6033 tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
6035 tp->snap_len = TFCAPTUREMAX_G(ctlb);
6036 tp->min_len = TFMINPKTSIZE_G(ctlb);
6037 tp->skip_ofst = TFOFFSET_G(ctla);
6038 tp->skip_len = TFLENGTH_G(ctla);
6040 ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
6041 data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
6042 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
6044 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6045 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6046 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6051 * t4_pmtx_get_stats - returns the HW stats from PMTX
6052 * @adap: the adapter
6053 * @cnt: where to store the count statistics
6054 * @cycles: where to store the cycle statistics
6056 * Returns performance statistics from PMTX.
6058 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6063 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6064 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
6065 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
6066 if (is_t4(adap->params.chip)) {
6067 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
6069 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
6070 PM_TX_DBG_DATA_A, data, 2,
6071 PM_TX_DBG_STAT_MSB_A);
6072 cycles[i] = (((u64)data[0] << 32) | data[1]);
6078 * t4_pmrx_get_stats - returns the HW stats from PMRX
6079 * @adap: the adapter
6080 * @cnt: where to store the count statistics
6081 * @cycles: where to store the cycle statistics
6083 * Returns performance statistics from PMRX.
6085 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6090 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6091 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
6092 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
6093 if (is_t4(adap->params.chip)) {
6094 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
6096 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
6097 PM_RX_DBG_DATA_A, data, 2,
6098 PM_RX_DBG_STAT_MSB_A);
6099 cycles[i] = (((u64)data[0] << 32) | data[1]);
6105 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
6106 * @adapter: the adapter
6107 * @pidx: the port index
6109 * Computes and returns a bitmap indicating which MPS buffer groups are
6110 * associated with the given Port. Bit i is set if buffer group i is
6113 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
6116 unsigned int chip_version, nports;
6118 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6119 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6121 switch (chip_version) {
6126 case 2: return 3 << (2 * pidx);
6127 case 4: return 1 << pidx;
6133 case 2: return 1 << (2 * pidx);
6138 dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6139 chip_version, nports);
6145 * t4_get_mps_bg_map - return the buffer groups associated with a port
6146 * @adapter: the adapter
6147 * @pidx: the port index
6149 * Returns a bitmap indicating which MPS buffer groups are associated
6150 * with the given Port. Bit i is set if buffer group i is used by the
6153 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6156 unsigned int nports;
6158 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6159 if (pidx >= nports) {
6160 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
6165 /* If we've already retrieved/computed this, just return the result.
6167 mps_bg_map = adapter->params.mps_bg_map;
6168 if (mps_bg_map[pidx])
6169 return mps_bg_map[pidx];
6171 /* Newer Firmware can tell us what the MPS Buffer Group Map is.
6172 * If we're talking to such Firmware, let it tell us. If the new
6173 * API isn't supported, revert back to old hardcoded way. The value
6174 * obtained from Firmware is encoded in below format:
6176 * val = (( MPSBGMAP[Port 3] << 24 ) |
6177 * ( MPSBGMAP[Port 2] << 16 ) |
6178 * ( MPSBGMAP[Port 1] << 8 ) |
6179 * ( MPSBGMAP[Port 0] << 0 ))
6181 if (adapter->flags & CXGB4_FW_OK) {
6185 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6186 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6187 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6188 0, 1, ¶m, &val);
6192 /* Store the BG Map for all of the Ports in order to
6193 * avoid more calls to the Firmware in the future.
6195 for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
6196 mps_bg_map[p] = val & 0xff;
6198 return mps_bg_map[pidx];
6202 /* Either we're not talking to the Firmware or we're dealing with
6203 * older Firmware which doesn't support the new API to get the MPS
6204 * Buffer Group Map. Fall back to computing it ourselves.
6206 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6207 return mps_bg_map[pidx];
6211 * t4_get_tp_e2c_map - return the E2C channel map associated with a port
6212 * @adapter: the adapter
6213 * @pidx: the port index
6215 static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6217 unsigned int nports;
6221 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6222 if (pidx >= nports) {
6223 CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n",
6228 /* FW version >= 1.16.44.0 can determine E2C channel map using
6229 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6231 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6232 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPCHMAP));
6233 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6234 0, 1, ¶m, &val);
6236 return (val >> (8 * pidx)) & 0xff;
6242 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6243 * @adap: the adapter
6244 * @pidx: the port index
6246 * Returns a bitmap indicating which TP Ingress Channels are associated
6247 * with a given Port. Bit i is set if TP Ingress Channel i is used by
6250 unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6252 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6253 unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6255 if (pidx >= nports) {
6256 dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6261 switch (chip_version) {
6264 /* Note that this happens to be the same values as the MPS
6265 * Buffer Group Map for these Chips. But we replicate the code
6266 * here because they're really separate concepts.
6270 case 2: return 3 << (2 * pidx);
6271 case 4: return 1 << pidx;
6278 case 2: return 1 << pidx;
6283 dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6284 chip_version, nports);
6289 * t4_get_port_type_description - return Port Type string description
6290 * @port_type: firmware Port Type enumeration
6292 const char *t4_get_port_type_description(enum fw_port_type port_type)
6294 static const char *const port_type_description[] = {
6320 if (port_type < ARRAY_SIZE(port_type_description))
6321 return port_type_description[port_type];
6326 * t4_get_port_stats_offset - collect port stats relative to a previous
6328 * @adap: The adapter
6330 * @stats: Current stats to fill
6331 * @offset: Previous stats snapshot
6333 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6334 struct port_stats *stats,
6335 struct port_stats *offset)
6340 t4_get_port_stats(adap, idx, stats);
6341 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6342 i < (sizeof(struct port_stats) / sizeof(u64));
6348 * t4_get_port_stats - collect port statistics
6349 * @adap: the adapter
6350 * @idx: the port index
6351 * @p: the stats structure to fill
6353 * Collect statistics related to the given port from HW.
6355 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6357 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6358 u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
6360 #define GET_STAT(name) \
6361 t4_read_reg64(adap, \
6362 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
6363 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
6364 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6366 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6367 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6368 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6369 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6370 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6371 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6372 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6373 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6374 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6375 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6376 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6377 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6378 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6379 p->tx_drop = GET_STAT(TX_PORT_DROP);
6380 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6381 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6382 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6383 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6384 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6385 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6386 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6387 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6388 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6390 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6391 if (stat_ctl & COUNTPAUSESTATTX_F)
6392 p->tx_frames_64 -= p->tx_pause;
6393 if (stat_ctl & COUNTPAUSEMCTX_F)
6394 p->tx_mcast_frames -= p->tx_pause;
6396 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6397 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6398 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6399 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6400 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6401 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6402 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6403 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6404 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6405 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6406 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6407 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6408 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6409 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6410 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6411 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6412 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6413 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6414 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6415 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6416 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6417 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6418 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6419 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6420 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6421 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6422 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6424 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6425 if (stat_ctl & COUNTPAUSESTATRX_F)
6426 p->rx_frames_64 -= p->rx_pause;
6427 if (stat_ctl & COUNTPAUSEMCRX_F)
6428 p->rx_mcast_frames -= p->rx_pause;
6431 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6432 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6433 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6434 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6435 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6436 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6437 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6438 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6445 * t4_get_lb_stats - collect loopback port statistics
6446 * @adap: the adapter
6447 * @idx: the loopback port index
6448 * @p: the stats structure to fill
6450 * Return HW statistics for the given loopback port.
6452 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6454 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6456 #define GET_STAT(name) \
6457 t4_read_reg64(adap, \
6458 (is_t4(adap->params.chip) ? \
6459 PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6460 T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6461 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6463 p->octets = GET_STAT(BYTES);
6464 p->frames = GET_STAT(FRAMES);
6465 p->bcast_frames = GET_STAT(BCAST);
6466 p->mcast_frames = GET_STAT(MCAST);
6467 p->ucast_frames = GET_STAT(UCAST);
6468 p->error_frames = GET_STAT(ERROR);
6470 p->frames_64 = GET_STAT(64B);
6471 p->frames_65_127 = GET_STAT(65B_127B);
6472 p->frames_128_255 = GET_STAT(128B_255B);
6473 p->frames_256_511 = GET_STAT(256B_511B);
6474 p->frames_512_1023 = GET_STAT(512B_1023B);
6475 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6476 p->frames_1519_max = GET_STAT(1519B_MAX);
6477 p->drop = GET_STAT(DROP_FRAMES);
6479 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6480 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6481 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6482 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6483 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6484 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6485 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6486 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6492 /* t4_mk_filtdelwr - create a delete filter WR
6493 * @ftid: the filter ID
6494 * @wr: the filter work request to populate
6495 * @qid: ingress queue to receive the delete notification
6497 * Creates a filter work request to delete the supplied filter. If @qid is
6498 * negative the delete notification is suppressed.
6500 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6502 memset(wr, 0, sizeof(*wr));
6503 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6504 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6505 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6506 FW_FILTER_WR_NOREPLY_V(qid < 0));
6507 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
6509 wr->rx_chan_rx_rpl_iq =
6510 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
6513 #define INIT_CMD(var, cmd, rd_wr) do { \
6514 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6515 FW_CMD_REQUEST_F | \
6516 FW_CMD_##rd_wr##_F); \
6517 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6520 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6524 struct fw_ldst_cmd c;
6526 memset(&c, 0, sizeof(c));
6527 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6528 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6532 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6533 c.u.addrval.addr = cpu_to_be32(addr);
6534 c.u.addrval.val = cpu_to_be32(val);
6536 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6540 * t4_mdio_rd - read a PHY register through MDIO
6541 * @adap: the adapter
6542 * @mbox: mailbox to use for the FW command
6543 * @phy_addr: the PHY address
6544 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6545 * @reg: the register to read
6546 * @valp: where to store the value
6548 * Issues a FW command through the given mailbox to read a PHY register.
6550 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6551 unsigned int mmd, unsigned int reg, u16 *valp)
6555 struct fw_ldst_cmd c;
6557 memset(&c, 0, sizeof(c));
6558 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6559 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6560 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6562 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6563 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6564 FW_LDST_CMD_MMD_V(mmd));
6565 c.u.mdio.raddr = cpu_to_be16(reg);
6567 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6569 *valp = be16_to_cpu(c.u.mdio.rval);
6574 * t4_mdio_wr - write a PHY register through MDIO
6575 * @adap: the adapter
6576 * @mbox: mailbox to use for the FW command
6577 * @phy_addr: the PHY address
6578 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6579 * @reg: the register to write
6580 * @val: value to write
6582 * Issues a FW command through the given mailbox to write a PHY register.
6584 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6585 unsigned int mmd, unsigned int reg, u16 val)
6588 struct fw_ldst_cmd c;
6590 memset(&c, 0, sizeof(c));
6591 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6592 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6593 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6595 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6596 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6597 FW_LDST_CMD_MMD_V(mmd));
6598 c.u.mdio.raddr = cpu_to_be16(reg);
6599 c.u.mdio.rval = cpu_to_be16(val);
6601 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6605 * t4_sge_decode_idma_state - decode the idma state
6606 * @adapter: the adapter
6607 * @state: the state idma is stuck in
6609 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6611 static const char * const t4_decode[] = {
6613 "IDMA_PUSH_MORE_CPL_FIFO",
6614 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6616 "IDMA_PHYSADDR_SEND_PCIEHDR",
6617 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6618 "IDMA_PHYSADDR_SEND_PAYLOAD",
6619 "IDMA_SEND_FIFO_TO_IMSG",
6620 "IDMA_FL_REQ_DATA_FL_PREP",
6621 "IDMA_FL_REQ_DATA_FL",
6623 "IDMA_FL_H_REQ_HEADER_FL",
6624 "IDMA_FL_H_SEND_PCIEHDR",
6625 "IDMA_FL_H_PUSH_CPL_FIFO",
6626 "IDMA_FL_H_SEND_CPL",
6627 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6628 "IDMA_FL_H_SEND_IP_HDR",
6629 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6630 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6631 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6632 "IDMA_FL_D_SEND_PCIEHDR",
6633 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6634 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6635 "IDMA_FL_SEND_PCIEHDR",
6636 "IDMA_FL_PUSH_CPL_FIFO",
6638 "IDMA_FL_SEND_PAYLOAD_FIRST",
6639 "IDMA_FL_SEND_PAYLOAD",
6640 "IDMA_FL_REQ_NEXT_DATA_FL",
6641 "IDMA_FL_SEND_NEXT_PCIEHDR",
6642 "IDMA_FL_SEND_PADDING",
6643 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6644 "IDMA_FL_SEND_FIFO_TO_IMSG",
6645 "IDMA_FL_REQ_DATAFL_DONE",
6646 "IDMA_FL_REQ_HEADERFL_DONE",
6648 static const char * const t5_decode[] = {
6651 "IDMA_PUSH_MORE_CPL_FIFO",
6652 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6653 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6654 "IDMA_PHYSADDR_SEND_PCIEHDR",
6655 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6656 "IDMA_PHYSADDR_SEND_PAYLOAD",
6657 "IDMA_SEND_FIFO_TO_IMSG",
6658 "IDMA_FL_REQ_DATA_FL",
6660 "IDMA_FL_DROP_SEND_INC",
6661 "IDMA_FL_H_REQ_HEADER_FL",
6662 "IDMA_FL_H_SEND_PCIEHDR",
6663 "IDMA_FL_H_PUSH_CPL_FIFO",
6664 "IDMA_FL_H_SEND_CPL",
6665 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6666 "IDMA_FL_H_SEND_IP_HDR",
6667 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6668 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6669 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6670 "IDMA_FL_D_SEND_PCIEHDR",
6671 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6672 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6673 "IDMA_FL_SEND_PCIEHDR",
6674 "IDMA_FL_PUSH_CPL_FIFO",
6676 "IDMA_FL_SEND_PAYLOAD_FIRST",
6677 "IDMA_FL_SEND_PAYLOAD",
6678 "IDMA_FL_REQ_NEXT_DATA_FL",
6679 "IDMA_FL_SEND_NEXT_PCIEHDR",
6680 "IDMA_FL_SEND_PADDING",
6681 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6683 static const char * const t6_decode[] = {
6685 "IDMA_PUSH_MORE_CPL_FIFO",
6686 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6687 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6688 "IDMA_PHYSADDR_SEND_PCIEHDR",
6689 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6690 "IDMA_PHYSADDR_SEND_PAYLOAD",
6691 "IDMA_FL_REQ_DATA_FL",
6693 "IDMA_FL_DROP_SEND_INC",
6694 "IDMA_FL_H_REQ_HEADER_FL",
6695 "IDMA_FL_H_SEND_PCIEHDR",
6696 "IDMA_FL_H_PUSH_CPL_FIFO",
6697 "IDMA_FL_H_SEND_CPL",
6698 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6699 "IDMA_FL_H_SEND_IP_HDR",
6700 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6701 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6702 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6703 "IDMA_FL_D_SEND_PCIEHDR",
6704 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6705 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6706 "IDMA_FL_SEND_PCIEHDR",
6707 "IDMA_FL_PUSH_CPL_FIFO",
6709 "IDMA_FL_SEND_PAYLOAD_FIRST",
6710 "IDMA_FL_SEND_PAYLOAD",
6711 "IDMA_FL_REQ_NEXT_DATA_FL",
6712 "IDMA_FL_SEND_NEXT_PCIEHDR",
6713 "IDMA_FL_SEND_PADDING",
6714 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6716 static const u32 sge_regs[] = {
6717 SGE_DEBUG_DATA_LOW_INDEX_2_A,
6718 SGE_DEBUG_DATA_LOW_INDEX_3_A,
6719 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
6721 const char **sge_idma_decode;
6722 int sge_idma_decode_nstates;
6724 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6726 /* Select the right set of decode strings to dump depending on the
6727 * adapter chip type.
6729 switch (chip_version) {
6731 sge_idma_decode = (const char **)t4_decode;
6732 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6736 sge_idma_decode = (const char **)t5_decode;
6737 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6741 sge_idma_decode = (const char **)t6_decode;
6742 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6746 dev_err(adapter->pdev_dev,
6747 "Unsupported chip version %d\n", chip_version);
6751 if (is_t4(adapter->params.chip)) {
6752 sge_idma_decode = (const char **)t4_decode;
6753 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6755 sge_idma_decode = (const char **)t5_decode;
6756 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6759 if (state < sge_idma_decode_nstates)
6760 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6762 CH_WARN(adapter, "idma state %d unknown\n", state);
6764 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6765 CH_WARN(adapter, "SGE register %#x value %#x\n",
6766 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6770 * t4_sge_ctxt_flush - flush the SGE context cache
6771 * @adap: the adapter
6772 * @mbox: mailbox to use for the FW command
6773 * @ctxt_type: Egress or Ingress
6775 * Issues a FW command through the given mailbox to flush the
6776 * SGE context cache.
6778 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
6782 struct fw_ldst_cmd c;
6784 memset(&c, 0, sizeof(c));
6785 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
6786 FW_LDST_ADDRSPC_SGE_EGRC :
6787 FW_LDST_ADDRSPC_SGE_INGC);
6788 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6789 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6791 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6792 c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6794 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6799 * t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
6800 * @adap: the adapter
6801 * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
6802 * @dbqtimers: SGE Doorbell Queue Timer table
6804 * Reads the SGE Doorbell Queue Timer values into the provided table.
6805 * Returns 0 on success (Firmware and Hardware support this feature),
6806 * an error on failure.
6808 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
6811 int ret, dbqtimerix;
6815 while (dbqtimerix < ndbqtimers) {
6817 u32 params[7], vals[7];
6819 nparams = ndbqtimers - dbqtimerix;
6820 if (nparams > ARRAY_SIZE(params))
6821 nparams = ARRAY_SIZE(params);
6823 for (param = 0; param < nparams; param++)
6825 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6826 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
6827 FW_PARAMS_PARAM_Y_V(dbqtimerix + param));
6828 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
6829 nparams, params, vals);
6833 for (param = 0; param < nparams; param++)
6834 dbqtimers[dbqtimerix++] = vals[param];
6840 * t4_fw_hello - establish communication with FW
6841 * @adap: the adapter
6842 * @mbox: mailbox to use for the FW command
6843 * @evt_mbox: mailbox to receive async FW events
6844 * @master: specifies the caller's willingness to be the device master
6845 * @state: returns the current device state (if non-NULL)
6847 * Issues a command to establish communication with FW. Returns either
6848 * an error (negative integer) or the mailbox of the Master PF.
6850 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6851 enum dev_master master, enum dev_state *state)
6854 struct fw_hello_cmd c;
6856 unsigned int master_mbox;
6857 int retries = FW_CMD_HELLO_RETRIES;
6860 memset(&c, 0, sizeof(c));
6861 INIT_CMD(c, HELLO, WRITE);
6862 c.err_to_clearinit = cpu_to_be32(
6863 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6864 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6865 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6866 mbox : FW_HELLO_CMD_MBMASTER_M) |
6867 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6868 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6869 FW_HELLO_CMD_CLEARINIT_F);
6872 * Issue the HELLO command to the firmware. If it's not successful
6873 * but indicates that we got a "busy" or "timeout" condition, retry
6874 * the HELLO until we exhaust our retry limit. If we do exceed our
6875 * retry limit, check to see if the firmware left us any error
6876 * information and report that if so.
6878 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6880 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6882 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6883 t4_report_fw_error(adap);
6887 v = be32_to_cpu(c.err_to_clearinit);
6888 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6890 if (v & FW_HELLO_CMD_ERR_F)
6891 *state = DEV_STATE_ERR;
6892 else if (v & FW_HELLO_CMD_INIT_F)
6893 *state = DEV_STATE_INIT;
6895 *state = DEV_STATE_UNINIT;
6899 * If we're not the Master PF then we need to wait around for the
6900 * Master PF Driver to finish setting up the adapter.
6902 * Note that we also do this wait if we're a non-Master-capable PF and
6903 * there is no current Master PF; a Master PF may show up momentarily
6904 * and we wouldn't want to fail pointlessly. (This can happen when an
6905 * OS loads lots of different drivers rapidly at the same time). In
6906 * this case, the Master PF returned by the firmware will be
6907 * PCIE_FW_MASTER_M so the test below will work ...
6909 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6910 master_mbox != mbox) {
6911 int waiting = FW_CMD_HELLO_TIMEOUT;
6914 * Wait for the firmware to either indicate an error or
6915 * initialized state. If we see either of these we bail out
6916 * and report the issue to the caller. If we exhaust the
6917 * "hello timeout" and we haven't exhausted our retries, try
6918 * again. Otherwise bail with a timeout error.
6927 * If neither Error nor Initialized are indicated
6928 * by the firmware keep waiting till we exhaust our
6929 * timeout ... and then retry if we haven't exhausted
6932 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6933 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6944 * We either have an Error or Initialized condition
6945 * report errors preferentially.
6948 if (pcie_fw & PCIE_FW_ERR_F)
6949 *state = DEV_STATE_ERR;
6950 else if (pcie_fw & PCIE_FW_INIT_F)
6951 *state = DEV_STATE_INIT;
6955 * If we arrived before a Master PF was selected and
6956 * there's not a valid Master PF, grab its identity
6959 if (master_mbox == PCIE_FW_MASTER_M &&
6960 (pcie_fw & PCIE_FW_MASTER_VLD_F))
6961 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6970 * t4_fw_bye - end communication with FW
6971 * @adap: the adapter
6972 * @mbox: mailbox to use for the FW command
6974 * Issues a command to terminate communication with FW.
6976 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6978 struct fw_bye_cmd c;
6980 memset(&c, 0, sizeof(c));
6981 INIT_CMD(c, BYE, WRITE);
6982 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6986 * t4_init_cmd - ask FW to initialize the device
6987 * @adap: the adapter
6988 * @mbox: mailbox to use for the FW command
6990 * Issues a command to FW to partially initialize the device. This
6991 * performs initialization that generally doesn't depend on user input.
6993 int t4_early_init(struct adapter *adap, unsigned int mbox)
6995 struct fw_initialize_cmd c;
6997 memset(&c, 0, sizeof(c));
6998 INIT_CMD(c, INITIALIZE, WRITE);
6999 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7003 * t4_fw_reset - issue a reset to FW
7004 * @adap: the adapter
7005 * @mbox: mailbox to use for the FW command
7006 * @reset: specifies the type of reset to perform
7008 * Issues a reset command of the specified type to FW.
7010 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7012 struct fw_reset_cmd c;
7014 memset(&c, 0, sizeof(c));
7015 INIT_CMD(c, RESET, WRITE);
7016 c.val = cpu_to_be32(reset);
7017 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7021 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7022 * @adap: the adapter
7023 * @mbox: mailbox to use for the FW RESET command (if desired)
7024 * @force: force uP into RESET even if FW RESET command fails
7026 * Issues a RESET command to firmware (if desired) with a HALT indication
7027 * and then puts the microprocessor into RESET state. The RESET command
7028 * will only be issued if a legitimate mailbox is provided (mbox <=
7029 * PCIE_FW_MASTER_M).
7031 * This is generally used in order for the host to safely manipulate the
7032 * adapter without fear of conflicting with whatever the firmware might
7033 * be doing. The only way out of this state is to RESTART the firmware
7036 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7041 * If a legitimate mailbox is provided, issue a RESET command
7042 * with a HALT indication.
7044 if (mbox <= PCIE_FW_MASTER_M) {
7045 struct fw_reset_cmd c;
7047 memset(&c, 0, sizeof(c));
7048 INIT_CMD(c, RESET, WRITE);
7049 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
7050 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
7051 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7055 * Normally we won't complete the operation if the firmware RESET
7056 * command fails but if our caller insists we'll go ahead and put the
7057 * uP into RESET. This can be useful if the firmware is hung or even
7058 * missing ... We'll have to take the risk of putting the uP into
7059 * RESET without the cooperation of firmware in that case.
7061 * We also force the firmware's HALT flag to be on in case we bypassed
7062 * the firmware RESET command above or we're dealing with old firmware
7063 * which doesn't have the HALT capability. This will serve as a flag
7064 * for the incoming firmware to know that it's coming out of a HALT
7065 * rather than a RESET ... if it's new enough to understand that ...
7067 if (ret == 0 || force) {
7068 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
7069 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
7074 * And we always return the result of the firmware RESET command
7075 * even when we force the uP into RESET ...
7081 * t4_fw_restart - restart the firmware by taking the uP out of RESET
7082 * @adap: the adapter
7083 * @mbox: mailbox to use for the FW command
7084 * @reset: if we want to do a RESET to restart things
7086 * Restart firmware previously halted by t4_fw_halt(). On successful
7087 * return the previous PF Master remains as the new PF Master and there
7088 * is no need to issue a new HELLO command, etc.
7090 * We do this in two ways:
7092 * 1. If we're dealing with newer firmware we'll simply want to take
7093 * the chip's microprocessor out of RESET. This will cause the
7094 * firmware to start up from its start vector. And then we'll loop
7095 * until the firmware indicates it's started again (PCIE_FW.HALT
7096 * reset to 0) or we timeout.
7098 * 2. If we're dealing with older firmware then we'll need to RESET
7099 * the chip since older firmware won't recognize the PCIE_FW.HALT
7100 * flag and automatically RESET itself on startup.
7102 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7106 * Since we're directing the RESET instead of the firmware
7107 * doing it automatically, we need to clear the PCIE_FW.HALT
7110 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
7113 * If we've been given a valid mailbox, first try to get the
7114 * firmware to do the RESET. If that works, great and we can
7115 * return success. Otherwise, if we haven't been given a
7116 * valid mailbox or the RESET command failed, fall back to
7117 * hitting the chip with a hammer.
7119 if (mbox <= PCIE_FW_MASTER_M) {
7120 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
7122 if (t4_fw_reset(adap, mbox,
7123 PIORST_F | PIORSTMODE_F) == 0)
7127 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
7132 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
7133 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7134 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
7145 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7146 * @adap: the adapter
7147 * @mbox: mailbox to use for the FW RESET command (if desired)
7148 * @fw_data: the firmware image to write
7150 * @force: force upgrade even if firmware doesn't cooperate
7152 * Perform all of the steps necessary for upgrading an adapter's
7153 * firmware image. Normally this requires the cooperation of the
7154 * existing firmware in order to halt all existing activities
7155 * but if an invalid mailbox token is passed in we skip that step
7156 * (though we'll still put the adapter microprocessor into RESET in
7159 * On successful return the new firmware will have been loaded and
7160 * the adapter will have been fully RESET losing all previous setup
7161 * state. On unsuccessful return the adapter may be completely hosed ...
7162 * positive errno indicates that the adapter is ~probably~ intact, a
7163 * negative errno indicates that things are looking bad ...
7165 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7166 const u8 *fw_data, unsigned int size, int force)
7168 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7171 if (!t4_fw_matches_chip(adap, fw_hdr))
7174 /* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
7175 * set wont be sent when we are flashing FW.
7177 adap->flags &= ~CXGB4_FW_OK;
7179 ret = t4_fw_halt(adap, mbox, force);
7180 if (ret < 0 && !force)
7183 ret = t4_load_fw(adap, fw_data, size);
7188 * If there was a Firmware Configuration File stored in FLASH,
7189 * there's a good chance that it won't be compatible with the new
7190 * Firmware. In order to prevent difficult to diagnose adapter
7191 * initialization issues, we clear out the Firmware Configuration File
7192 * portion of the FLASH . The user will need to re-FLASH a new
7193 * Firmware Configuration File which is compatible with the new
7194 * Firmware if that's desired.
7196 (void)t4_load_cfg(adap, NULL, 0);
7199 * Older versions of the firmware don't understand the new
7200 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7201 * restart. So for newly loaded older firmware we'll have to do the
7202 * RESET for it so it starts up on a clean slate. We can tell if
7203 * the newly loaded firmware will handle this right by checking
7204 * its header flags to see if it advertises the capability.
7206 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7207 ret = t4_fw_restart(adap, mbox, reset);
7209 /* Grab potentially new Firmware Device Log parameters so we can see
7210 * how healthy the new Firmware is. It's okay to contact the new
7211 * Firmware for these parameters even though, as far as it's
7212 * concerned, we've never said "HELLO" to it ...
7214 (void)t4_init_devlog_params(adap);
7216 adap->flags |= CXGB4_FW_OK;
7221 * t4_fl_pkt_align - return the fl packet alignment
7222 * @adap: the adapter
7224 * T4 has a single field to specify the packing and padding boundary.
7225 * T5 onwards has separate fields for this and hence the alignment for
7226 * next packet offset is maximum of these two.
7229 int t4_fl_pkt_align(struct adapter *adap)
7231 u32 sge_control, sge_control2;
7232 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7234 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
7236 /* T4 uses a single control field to specify both the PCIe Padding and
7237 * Packing Boundary. T5 introduced the ability to specify these
7238 * separately. The actual Ingress Packet Data alignment boundary
7239 * within Packed Buffer Mode is the maximum of these two
7240 * specifications. (Note that it makes no real practical sense to
7241 * have the Padding Boundary be larger than the Packing Boundary but you
7242 * could set the chip up that way and, in fact, legacy T4 code would
7243 * end doing this because it would initialize the Padding Boundary and
7244 * leave the Packing Boundary initialized to 0 (16 bytes).)
7245 * Padding Boundary values in T6 starts from 8B,
7246 * where as it is 32B for T4 and T5.
7248 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7249 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
7251 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
7253 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
7255 fl_align = ingpadboundary;
7256 if (!is_t4(adap->params.chip)) {
7257 /* T5 has a weird interpretation of one of the PCIe Packing
7258 * Boundary values. No idea why ...
7260 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
7261 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
7262 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
7263 ingpackboundary = 16;
7265 ingpackboundary = 1 << (ingpackboundary +
7266 INGPACKBOUNDARY_SHIFT_X);
7268 fl_align = max(ingpadboundary, ingpackboundary);
7274 * t4_fixup_host_params - fix up host-dependent parameters
7275 * @adap: the adapter
7276 * @page_size: the host's Base Page Size
7277 * @cache_line_size: the host's Cache Line Size
7279 * Various registers in T4 contain values which are dependent on the
7280 * host's Base Page and Cache Line Sizes. This function will fix all of
7281 * those registers with the appropriate values as passed in ...
7283 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7284 unsigned int cache_line_size)
7286 unsigned int page_shift = fls(page_size) - 1;
7287 unsigned int sge_hps = page_shift - 10;
7288 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7289 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7290 unsigned int fl_align_log = fls(fl_align) - 1;
7292 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7293 HOSTPAGESIZEPF0_V(sge_hps) |
7294 HOSTPAGESIZEPF1_V(sge_hps) |
7295 HOSTPAGESIZEPF2_V(sge_hps) |
7296 HOSTPAGESIZEPF3_V(sge_hps) |
7297 HOSTPAGESIZEPF4_V(sge_hps) |
7298 HOSTPAGESIZEPF5_V(sge_hps) |
7299 HOSTPAGESIZEPF6_V(sge_hps) |
7300 HOSTPAGESIZEPF7_V(sge_hps));
7302 if (is_t4(adap->params.chip)) {
7303 t4_set_reg_field(adap, SGE_CONTROL_A,
7304 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7305 EGRSTATUSPAGESIZE_F,
7306 INGPADBOUNDARY_V(fl_align_log -
7307 INGPADBOUNDARY_SHIFT_X) |
7308 EGRSTATUSPAGESIZE_V(stat_len != 64));
7310 unsigned int pack_align;
7311 unsigned int ingpad, ingpack;
7313 /* T5 introduced the separation of the Free List Padding and
7314 * Packing Boundaries. Thus, we can select a smaller Padding
7315 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7316 * Bandwidth, and use a Packing Boundary which is large enough
7317 * to avoid false sharing between CPUs, etc.
7319 * For the PCI Link, the smaller the Padding Boundary the
7320 * better. For the Memory Controller, a smaller Padding
7321 * Boundary is better until we cross under the Memory Line
7322 * Size (the minimum unit of transfer to/from Memory). If we
7323 * have a Padding Boundary which is smaller than the Memory
7324 * Line Size, that'll involve a Read-Modify-Write cycle on the
7325 * Memory Controller which is never good.
7328 /* We want the Packing Boundary to be based on the Cache Line
7329 * Size in order to help avoid False Sharing performance
7330 * issues between CPUs, etc. We also want the Packing
7331 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7332 * get best performance when the Packing Boundary is a
7333 * multiple of the Maximum Payload Size.
7335 pack_align = fl_align;
7336 if (pci_is_pcie(adap->pdev)) {
7337 unsigned int mps, mps_log;
7340 /* The PCIe Device Control Maximum Payload Size field
7341 * [bits 7:5] encodes sizes as powers of 2 starting at
7344 pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL,
7346 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7348 if (mps > pack_align)
7352 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7353 * value for the Packing Boundary. This corresponds to 16
7354 * bytes instead of the expected 32 bytes. So if we want 32
7355 * bytes, the best we can really do is 64 bytes ...
7357 if (pack_align <= 16) {
7358 ingpack = INGPACKBOUNDARY_16B_X;
7360 } else if (pack_align == 32) {
7361 ingpack = INGPACKBOUNDARY_64B_X;
7364 unsigned int pack_align_log = fls(pack_align) - 1;
7366 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7367 fl_align = pack_align;
7370 /* Use the smallest Ingress Padding which isn't smaller than
7371 * the Memory Controller Read/Write Size. We'll take that as
7372 * being 8 bytes since we don't know of any system with a
7373 * wider Memory Controller Bus Width.
7375 if (is_t5(adap->params.chip))
7376 ingpad = INGPADBOUNDARY_32B_X;
7378 ingpad = T6_INGPADBOUNDARY_8B_X;
7380 t4_set_reg_field(adap, SGE_CONTROL_A,
7381 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7382 EGRSTATUSPAGESIZE_F,
7383 INGPADBOUNDARY_V(ingpad) |
7384 EGRSTATUSPAGESIZE_V(stat_len != 64));
7385 t4_set_reg_field(adap, SGE_CONTROL2_A,
7386 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
7387 INGPACKBOUNDARY_V(ingpack));
7390 * Adjust various SGE Free List Host Buffer Sizes.
7392 * This is something of a crock since we're using fixed indices into
7393 * the array which are also known by the sge.c code and the T4
7394 * Firmware Configuration File. We need to come up with a much better
7395 * approach to managing this array. For now, the first four entries
7400 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7401 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7403 * For the single-MTU buffers in unpacked mode we need to include
7404 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7405 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7406 * Padding boundary. All of these are accommodated in the Factory
7407 * Default Firmware Configuration File but we need to adjust it for
7408 * this host's cache line size.
7410 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7411 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7412 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
7414 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7415 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
7418 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
7424 * t4_fw_initialize - ask FW to initialize the device
7425 * @adap: the adapter
7426 * @mbox: mailbox to use for the FW command
7428 * Issues a command to FW to partially initialize the device. This
7429 * performs initialization that generally doesn't depend on user input.
7431 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7433 struct fw_initialize_cmd c;
7435 memset(&c, 0, sizeof(c));
7436 INIT_CMD(c, INITIALIZE, WRITE);
7437 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7441 * t4_query_params_rw - query FW or device parameters
7442 * @adap: the adapter
7443 * @mbox: mailbox to use for the FW command
7446 * @nparams: the number of parameters
7447 * @params: the parameter names
7448 * @val: the parameter values
7449 * @rw: Write and read flag
7450 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7452 * Reads the value of FW or device parameters. Up to 7 parameters can be
7455 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7456 unsigned int vf, unsigned int nparams, const u32 *params,
7457 u32 *val, int rw, bool sleep_ok)
7460 struct fw_params_cmd c;
7461 __be32 *p = &c.param[0].mnem;
7466 memset(&c, 0, sizeof(c));
7467 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7468 FW_CMD_REQUEST_F | FW_CMD_READ_F |
7469 FW_PARAMS_CMD_PFN_V(pf) |
7470 FW_PARAMS_CMD_VFN_V(vf));
7471 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7473 for (i = 0; i < nparams; i++) {
7474 *p++ = cpu_to_be32(*params++);
7476 *p = cpu_to_be32(*(val + i));
7480 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7482 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7483 *val++ = be32_to_cpu(*p);
7487 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7488 unsigned int vf, unsigned int nparams, const u32 *params,
7491 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7495 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7496 unsigned int vf, unsigned int nparams, const u32 *params,
7499 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7504 * t4_set_params_timeout - sets FW or device parameters
7505 * @adap: the adapter
7506 * @mbox: mailbox to use for the FW command
7509 * @nparams: the number of parameters
7510 * @params: the parameter names
7511 * @val: the parameter values
7512 * @timeout: the timeout time
7514 * Sets the value of FW or device parameters. Up to 7 parameters can be
7515 * specified at once.
7517 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7518 unsigned int pf, unsigned int vf,
7519 unsigned int nparams, const u32 *params,
7520 const u32 *val, int timeout)
7522 struct fw_params_cmd c;
7523 __be32 *p = &c.param[0].mnem;
7528 memset(&c, 0, sizeof(c));
7529 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7530 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7531 FW_PARAMS_CMD_PFN_V(pf) |
7532 FW_PARAMS_CMD_VFN_V(vf));
7533 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7536 *p++ = cpu_to_be32(*params++);
7537 *p++ = cpu_to_be32(*val++);
7540 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7544 * t4_set_params - sets FW or device parameters
7545 * @adap: the adapter
7546 * @mbox: mailbox to use for the FW command
7549 * @nparams: the number of parameters
7550 * @params: the parameter names
7551 * @val: the parameter values
7553 * Sets the value of FW or device parameters. Up to 7 parameters can be
7554 * specified at once.
7556 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7557 unsigned int vf, unsigned int nparams, const u32 *params,
7560 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7561 FW_CMD_MAX_TIMEOUT);
7565 * t4_cfg_pfvf - configure PF/VF resource limits
7566 * @adap: the adapter
7567 * @mbox: mailbox to use for the FW command
7568 * @pf: the PF being configured
7569 * @vf: the VF being configured
7570 * @txq: the max number of egress queues
7571 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7572 * @rxqi: the max number of interrupt-capable ingress queues
7573 * @rxq: the max number of interruptless ingress queues
7574 * @tc: the PCI traffic class
7575 * @vi: the max number of virtual interfaces
7576 * @cmask: the channel access rights mask for the PF/VF
7577 * @pmask: the port access rights mask for the PF/VF
7578 * @nexact: the maximum number of exact MPS filters
7579 * @rcaps: read capabilities
7580 * @wxcaps: write/execute capabilities
7582 * Configures resource limits and capabilities for a physical or virtual
7585 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7586 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7587 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7588 unsigned int vi, unsigned int cmask, unsigned int pmask,
7589 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7591 struct fw_pfvf_cmd c;
7593 memset(&c, 0, sizeof(c));
7594 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7595 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7596 FW_PFVF_CMD_VFN_V(vf));
7597 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7598 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7599 FW_PFVF_CMD_NIQ_V(rxq));
7600 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7601 FW_PFVF_CMD_PMASK_V(pmask) |
7602 FW_PFVF_CMD_NEQ_V(txq));
7603 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7604 FW_PFVF_CMD_NVI_V(vi) |
7605 FW_PFVF_CMD_NEXACTF_V(nexact));
7606 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7607 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7608 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
7609 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7613 * t4_alloc_vi - allocate a virtual interface
7614 * @adap: the adapter
7615 * @mbox: mailbox to use for the FW command
7616 * @port: physical port associated with the VI
7617 * @pf: the PF owning the VI
7618 * @vf: the VF owning the VI
7619 * @nmac: number of MAC addresses needed (1 to 5)
7620 * @mac: the MAC addresses of the VI
7621 * @rss_size: size of RSS table slice associated with this VI
7622 * @vivld: the destination to store the VI Valid value.
7623 * @vin: the destination to store the VIN value.
7625 * Allocates a virtual interface for the given physical port. If @mac is
7626 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7627 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7628 * stored consecutively so the space needed is @nmac * 6 bytes.
7629 * Returns a negative error number or the non-negative VI id.
7631 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7632 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7633 unsigned int *rss_size, u8 *vivld, u8 *vin)
7638 memset(&c, 0, sizeof(c));
7639 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7640 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7641 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7642 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
7643 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
7646 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7651 memcpy(mac, c.mac, sizeof(c.mac));
7654 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7657 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7660 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7663 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7667 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7670 *vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16));
7673 *vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16));
7675 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
7679 * t4_free_vi - free a virtual interface
7680 * @adap: the adapter
7681 * @mbox: mailbox to use for the FW command
7682 * @pf: the PF owning the VI
7683 * @vf: the VF owning the VI
7684 * @viid: virtual interface identifiler
7686 * Free a previously allocated virtual interface.
7688 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7689 unsigned int vf, unsigned int viid)
7693 memset(&c, 0, sizeof(c));
7694 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7697 FW_VI_CMD_PFN_V(pf) |
7698 FW_VI_CMD_VFN_V(vf));
7699 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7700 c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7702 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7706 * t4_set_rxmode - set Rx properties of a virtual interface
7707 * @adap: the adapter
7708 * @mbox: mailbox to use for the FW command
7710 * @viid_mirror: the mirror VI id
7711 * @mtu: the new MTU or -1
7712 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7713 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7714 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7715 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7716 * @sleep_ok: if true we may sleep while awaiting command completion
7718 * Sets Rx properties of a virtual interface.
7720 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7721 unsigned int viid_mirror, int mtu, int promisc, int all_multi,
7722 int bcast, int vlanex, bool sleep_ok)
7724 struct fw_vi_rxmode_cmd c, c_mirror;
7727 /* convert to FW values */
7729 mtu = FW_RXMODE_MTU_NO_CHG;
7731 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
7733 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
7735 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
7737 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
7739 memset(&c, 0, sizeof(c));
7740 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7741 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7742 FW_VI_RXMODE_CMD_VIID_V(viid));
7743 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7745 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7746 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7747 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7748 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7749 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
7752 memcpy(&c_mirror, &c, sizeof(c_mirror));
7753 c_mirror.op_to_viid =
7754 cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7755 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7756 FW_VI_RXMODE_CMD_VIID_V(viid_mirror));
7759 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7764 ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror),
7771 * t4_free_encap_mac_filt - frees MPS entry at given index
7772 * @adap: the adapter
7774 * @idx: index of MPS entry to be freed
7775 * @sleep_ok: call is allowed to sleep
7777 * Frees the MPS entry at supplied index
7779 * Returns a negative error number or zero on success
7781 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
7782 int idx, bool sleep_ok)
7784 struct fw_vi_mac_exact *p;
7785 u8 addr[] = {0, 0, 0, 0, 0, 0};
7786 struct fw_vi_mac_cmd c;
7790 memset(&c, 0, sizeof(c));
7791 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7792 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7794 FW_VI_MAC_CMD_VIID_V(viid));
7795 exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
7796 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7800 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7801 FW_VI_MAC_CMD_IDX_V(idx));
7802 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7803 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7808 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
7809 * @adap: the adapter
7811 * @addr: the MAC address
7813 * @idx: index of the entry in mps tcam
7814 * @lookup_type: MAC address for inner (1) or outer (0) header
7815 * @port_id: the port index
7816 * @sleep_ok: call is allowed to sleep
7818 * Removes the mac entry at the specified index using raw mac interface.
7820 * Returns a negative error number on failure.
7822 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
7823 const u8 *addr, const u8 *mask, unsigned int idx,
7824 u8 lookup_type, u8 port_id, bool sleep_ok)
7826 struct fw_vi_mac_cmd c;
7827 struct fw_vi_mac_raw *p = &c.u.raw;
7830 memset(&c, 0, sizeof(c));
7831 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7832 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7834 FW_VI_MAC_CMD_VIID_V(viid));
7835 val = FW_CMD_LEN16_V(1) |
7836 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7837 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7838 FW_CMD_LEN16_V(val));
7840 p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
7841 FW_VI_MAC_ID_BASED_FREE);
7843 /* Lookup Type. Outer header: 0, Inner header: 1 */
7844 p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7845 DATAPORTNUM_V(port_id));
7846 /* Lookup mask and port mask */
7847 p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7848 DATAPORTNUM_V(DATAPORTNUM_M));
7850 /* Copy the address and the mask */
7851 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7852 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7854 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7858 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
7859 * @adap: the adapter
7861 * @addr: the MAC address
7863 * @vni: the VNI id for the tunnel protocol
7864 * @vni_mask: mask for the VNI id
7865 * @dip_hit: to enable DIP match for the MPS entry
7866 * @lookup_type: MAC address for inner (1) or outer (0) header
7867 * @sleep_ok: call is allowed to sleep
7869 * Allocates an MPS entry with specified MAC address and VNI value.
7871 * Returns a negative error number or the allocated index for this mac.
7873 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
7874 const u8 *addr, const u8 *mask, unsigned int vni,
7875 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
7878 struct fw_vi_mac_cmd c;
7879 struct fw_vi_mac_vni *p = c.u.exact_vni;
7883 memset(&c, 0, sizeof(c));
7884 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7885 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7886 FW_VI_MAC_CMD_VIID_V(viid));
7887 val = FW_CMD_LEN16_V(1) |
7888 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
7889 c.freemacs_to_len16 = cpu_to_be32(val);
7890 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7891 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
7892 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7893 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
7895 p->lookup_type_to_vni =
7896 cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
7897 FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
7898 FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
7899 p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
7900 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7902 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7907 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
7908 * @adap: the adapter
7910 * @addr: the MAC address
7912 * @idx: index at which to add this entry
7913 * @lookup_type: MAC address for inner (1) or outer (0) header
7914 * @port_id: the port index
7915 * @sleep_ok: call is allowed to sleep
7917 * Adds the mac entry at the specified index using raw mac interface.
7919 * Returns a negative error number or the allocated index for this mac.
7921 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
7922 const u8 *addr, const u8 *mask, unsigned int idx,
7923 u8 lookup_type, u8 port_id, bool sleep_ok)
7926 struct fw_vi_mac_cmd c;
7927 struct fw_vi_mac_raw *p = &c.u.raw;
7930 memset(&c, 0, sizeof(c));
7931 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7932 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7933 FW_VI_MAC_CMD_VIID_V(viid));
7934 val = FW_CMD_LEN16_V(1) |
7935 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7936 c.freemacs_to_len16 = cpu_to_be32(val);
7938 /* Specify that this is an inner mac address */
7939 p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
7941 /* Lookup Type. Outer header: 0, Inner header: 1 */
7942 p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7943 DATAPORTNUM_V(port_id));
7944 /* Lookup mask and port mask */
7945 p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7946 DATAPORTNUM_V(DATAPORTNUM_M));
7948 /* Copy the address and the mask */
7949 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7950 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7952 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7954 ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
7963 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7964 * @adap: the adapter
7965 * @mbox: mailbox to use for the FW command
7967 * @free: if true any existing filters for this VI id are first removed
7968 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7969 * @addr: the MAC address(es)
7970 * @idx: where to store the index of each allocated filter
7971 * @hash: pointer to hash address filter bitmap
7972 * @sleep_ok: call is allowed to sleep
7974 * Allocates an exact-match filter for each of the supplied addresses and
7975 * sets it to the corresponding address. If @idx is not %NULL it should
7976 * have at least @naddr entries, each of which will be set to the index of
7977 * the filter allocated for the corresponding MAC address. If a filter
7978 * could not be allocated for an address its index is set to 0xffff.
7979 * If @hash is not %NULL addresses that fail to allocate an exact filter
7980 * are hashed and update the hash filter bitmap pointed at by @hash.
7982 * Returns a negative error number or the number of filters allocated.
7984 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7985 unsigned int viid, bool free, unsigned int naddr,
7986 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7988 int offset, ret = 0;
7989 struct fw_vi_mac_cmd c;
7990 unsigned int nfilters = 0;
7991 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
7992 unsigned int rem = naddr;
7994 if (naddr > max_naddr)
7997 for (offset = 0; offset < naddr ; /**/) {
7998 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
7999 rem : ARRAY_SIZE(c.u.exact));
8000 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8001 u.exact[fw_naddr]), 16);
8002 struct fw_vi_mac_exact *p;
8005 memset(&c, 0, sizeof(c));
8006 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8009 FW_CMD_EXEC_V(free) |
8010 FW_VI_MAC_CMD_VIID_V(viid));
8011 c.freemacs_to_len16 =
8012 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
8013 FW_CMD_LEN16_V(len16));
8015 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8017 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
8018 FW_VI_MAC_CMD_IDX_V(
8019 FW_VI_MAC_ADD_MAC));
8020 memcpy(p->macaddr, addr[offset + i],
8021 sizeof(p->macaddr));
8024 /* It's okay if we run out of space in our MAC address arena.
8025 * Some of the addresses we submit may get stored so we need
8026 * to run through the reply to see what the results were ...
8028 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8029 if (ret && ret != -FW_ENOMEM)
8032 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8033 u16 index = FW_VI_MAC_CMD_IDX_G(
8034 be16_to_cpu(p->valid_to_idx));
8037 idx[offset + i] = (index >= max_naddr ?
8039 if (index < max_naddr)
8043 hash_mac_addr(addr[offset + i]));
8051 if (ret == 0 || ret == -FW_ENOMEM)
8057 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
8058 * @adap: the adapter
8059 * @mbox: mailbox to use for the FW command
8061 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8062 * @addr: the MAC address(es)
8063 * @sleep_ok: call is allowed to sleep
8065 * Frees the exact-match filter for each of the supplied addresses
8067 * Returns a negative error number or the number of filters freed.
8069 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8070 unsigned int viid, unsigned int naddr,
8071 const u8 **addr, bool sleep_ok)
8073 int offset, ret = 0;
8074 struct fw_vi_mac_cmd c;
8075 unsigned int nfilters = 0;
8076 unsigned int max_naddr = is_t4(adap->params.chip) ?
8077 NUM_MPS_CLS_SRAM_L_INSTANCES :
8078 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8079 unsigned int rem = naddr;
8081 if (naddr > max_naddr)
8084 for (offset = 0; offset < (int)naddr ; /**/) {
8085 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8087 : ARRAY_SIZE(c.u.exact));
8088 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8089 u.exact[fw_naddr]), 16);
8090 struct fw_vi_mac_exact *p;
8093 memset(&c, 0, sizeof(c));
8094 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8098 FW_VI_MAC_CMD_VIID_V(viid));
8099 c.freemacs_to_len16 =
8100 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
8101 FW_CMD_LEN16_V(len16));
8103 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8104 p->valid_to_idx = cpu_to_be16(
8105 FW_VI_MAC_CMD_VALID_F |
8106 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
8107 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8110 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8114 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8115 u16 index = FW_VI_MAC_CMD_IDX_G(
8116 be16_to_cpu(p->valid_to_idx));
8118 if (index < max_naddr)
8132 * t4_change_mac - modifies the exact-match filter for a MAC address
8133 * @adap: the adapter
8134 * @mbox: mailbox to use for the FW command
8136 * @idx: index of existing filter for old value of MAC address, or -1
8137 * @addr: the new MAC address value
8138 * @persist: whether a new MAC allocation should be persistent
8139 * @smt_idx: the destination to store the new SMT index.
8141 * Modifies an exact-match filter and sets it to the new MAC address.
8142 * Note that in general it is not possible to modify the value of a given
8143 * filter so the generic way to modify an address filter is to free the one
8144 * being used by the old address value and allocate a new filter for the
8145 * new address value. @idx can be -1 if the address is a new addition.
8147 * Returns a negative error number or the index of the filter with the new
8150 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8151 int idx, const u8 *addr, bool persist, u8 *smt_idx)
8154 struct fw_vi_mac_cmd c;
8155 struct fw_vi_mac_exact *p = c.u.exact;
8156 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
8158 if (idx < 0) /* new allocation */
8159 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8160 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8162 memset(&c, 0, sizeof(c));
8163 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8164 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8165 FW_VI_MAC_CMD_VIID_V(viid));
8166 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
8167 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
8168 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
8169 FW_VI_MAC_CMD_IDX_V(idx));
8170 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8172 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8174 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
8175 if (ret >= max_mac_addr)
8178 if (adap->params.viid_smt_extn_support) {
8179 *smt_idx = FW_VI_MAC_CMD_SMTID_G
8180 (be32_to_cpu(c.op_to_viid));
8182 /* In T4/T5, SMT contains 256 SMAC entries
8183 * organized in 128 rows of 2 entries each.
8184 * In T6, SMT contains 256 SMAC entries in
8187 if (CHELSIO_CHIP_VERSION(adap->params.chip) <=
8189 *smt_idx = (viid & FW_VIID_VIN_M) << 1;
8191 *smt_idx = (viid & FW_VIID_VIN_M);
8199 * t4_set_addr_hash - program the MAC inexact-match hash filter
8200 * @adap: the adapter
8201 * @mbox: mailbox to use for the FW command
8203 * @ucast: whether the hash filter should also match unicast addresses
8204 * @vec: the value to be written to the hash filter
8205 * @sleep_ok: call is allowed to sleep
8207 * Sets the 64-bit inexact-match hash filter for a virtual interface.
8209 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8210 bool ucast, u64 vec, bool sleep_ok)
8212 struct fw_vi_mac_cmd c;
8214 memset(&c, 0, sizeof(c));
8215 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8216 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8217 FW_VI_ENABLE_CMD_VIID_V(viid));
8218 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
8219 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
8221 c.u.hash.hashvec = cpu_to_be64(vec);
8222 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8226 * t4_enable_vi_params - enable/disable a virtual interface
8227 * @adap: the adapter
8228 * @mbox: mailbox to use for the FW command
8230 * @rx_en: 1=enable Rx, 0=disable Rx
8231 * @tx_en: 1=enable Tx, 0=disable Tx
8232 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8234 * Enables/disables a virtual interface. Note that setting DCB Enable
8235 * only makes sense when enabling a Virtual Interface ...
8237 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8238 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8240 struct fw_vi_enable_cmd c;
8242 memset(&c, 0, sizeof(c));
8243 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8244 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8245 FW_VI_ENABLE_CMD_VIID_V(viid));
8246 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
8247 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
8248 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
8250 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8254 * t4_enable_vi - enable/disable a virtual interface
8255 * @adap: the adapter
8256 * @mbox: mailbox to use for the FW command
8258 * @rx_en: 1=enable Rx, 0=disable Rx
8259 * @tx_en: 1=enable Tx, 0=disable Tx
8261 * Enables/disables a virtual interface.
8263 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8264 bool rx_en, bool tx_en)
8266 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8270 * t4_enable_pi_params - enable/disable a Port's Virtual Interface
8271 * @adap: the adapter
8272 * @mbox: mailbox to use for the FW command
8273 * @pi: the Port Information structure
8274 * @rx_en: 1=enable Rx, 0=disable Rx
8275 * @tx_en: 1=enable Tx, 0=disable Tx
8276 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8278 * Enables/disables a Port's Virtual Interface. Note that setting DCB
8279 * Enable only makes sense when enabling a Virtual Interface ...
8280 * If the Virtual Interface enable/disable operation is successful,
8281 * we notify the OS-specific code of a potential Link Status change
8282 * via the OS Contract API t4_os_link_changed().
8284 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
8285 struct port_info *pi,
8286 bool rx_en, bool tx_en, bool dcb_en)
8288 int ret = t4_enable_vi_params(adap, mbox, pi->viid,
8289 rx_en, tx_en, dcb_en);
8292 t4_os_link_changed(adap, pi->port_id,
8293 rx_en && tx_en && pi->link_cfg.link_ok);
8298 * t4_identify_port - identify a VI's port by blinking its LED
8299 * @adap: the adapter
8300 * @mbox: mailbox to use for the FW command
8302 * @nblinks: how many times to blink LED at 2.5 Hz
8304 * Identifies a VI's port by blinking its LED.
8306 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8307 unsigned int nblinks)
8309 struct fw_vi_enable_cmd c;
8311 memset(&c, 0, sizeof(c));
8312 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8313 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8314 FW_VI_ENABLE_CMD_VIID_V(viid));
8315 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
8316 c.blinkdur = cpu_to_be16(nblinks);
8317 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8321 * t4_iq_stop - stop an ingress queue and its FLs
8322 * @adap: the adapter
8323 * @mbox: mailbox to use for the FW command
8324 * @pf: the PF owning the queues
8325 * @vf: the VF owning the queues
8326 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8327 * @iqid: ingress queue id
8328 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8329 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8331 * Stops an ingress queue and its associated FLs, if any. This causes
8332 * any current or future data/messages destined for these queues to be
8335 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8336 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8337 unsigned int fl0id, unsigned int fl1id)
8341 memset(&c, 0, sizeof(c));
8342 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8343 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8344 FW_IQ_CMD_VFN_V(vf));
8345 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
8346 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8347 c.iqid = cpu_to_be16(iqid);
8348 c.fl0id = cpu_to_be16(fl0id);
8349 c.fl1id = cpu_to_be16(fl1id);
8350 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8354 * t4_iq_free - free an ingress queue and its FLs
8355 * @adap: the adapter
8356 * @mbox: mailbox to use for the FW command
8357 * @pf: the PF owning the queues
8358 * @vf: the VF owning the queues
8359 * @iqtype: the ingress queue type
8360 * @iqid: ingress queue id
8361 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8362 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8364 * Frees an ingress queue and its associated FLs, if any.
8366 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8367 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8368 unsigned int fl0id, unsigned int fl1id)
8372 memset(&c, 0, sizeof(c));
8373 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8374 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8375 FW_IQ_CMD_VFN_V(vf));
8376 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
8377 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8378 c.iqid = cpu_to_be16(iqid);
8379 c.fl0id = cpu_to_be16(fl0id);
8380 c.fl1id = cpu_to_be16(fl1id);
8381 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8385 * t4_eth_eq_free - free an Ethernet egress queue
8386 * @adap: the adapter
8387 * @mbox: mailbox to use for the FW command
8388 * @pf: the PF owning the queue
8389 * @vf: the VF owning the queue
8390 * @eqid: egress queue id
8392 * Frees an Ethernet egress queue.
8394 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8395 unsigned int vf, unsigned int eqid)
8397 struct fw_eq_eth_cmd c;
8399 memset(&c, 0, sizeof(c));
8400 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
8401 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8402 FW_EQ_ETH_CMD_PFN_V(pf) |
8403 FW_EQ_ETH_CMD_VFN_V(vf));
8404 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
8405 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
8406 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8410 * t4_ctrl_eq_free - free a control egress queue
8411 * @adap: the adapter
8412 * @mbox: mailbox to use for the FW command
8413 * @pf: the PF owning the queue
8414 * @vf: the VF owning the queue
8415 * @eqid: egress queue id
8417 * Frees a control egress queue.
8419 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8420 unsigned int vf, unsigned int eqid)
8422 struct fw_eq_ctrl_cmd c;
8424 memset(&c, 0, sizeof(c));
8425 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
8426 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8427 FW_EQ_CTRL_CMD_PFN_V(pf) |
8428 FW_EQ_CTRL_CMD_VFN_V(vf));
8429 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
8430 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
8431 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8435 * t4_ofld_eq_free - free an offload egress queue
8436 * @adap: the adapter
8437 * @mbox: mailbox to use for the FW command
8438 * @pf: the PF owning the queue
8439 * @vf: the VF owning the queue
8440 * @eqid: egress queue id
8442 * Frees a control egress queue.
8444 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8445 unsigned int vf, unsigned int eqid)
8447 struct fw_eq_ofld_cmd c;
8449 memset(&c, 0, sizeof(c));
8450 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
8451 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8452 FW_EQ_OFLD_CMD_PFN_V(pf) |
8453 FW_EQ_OFLD_CMD_VFN_V(vf));
8454 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
8455 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
8456 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8460 * t4_link_down_rc_str - return a string for a Link Down Reason Code
8461 * @link_down_rc: Link Down Reason Code
8463 * Returns a string representation of the Link Down Reason Code.
8465 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
8467 static const char * const reason[] = {
8470 "Auto-negotiation Failure",
8472 "Insufficient Airflow",
8473 "Unable To Determine Reason",
8474 "No RX Signal Detected",
8478 if (link_down_rc >= ARRAY_SIZE(reason))
8479 return "Bad Reason Code";
8481 return reason[link_down_rc];
8484 /* Return the highest speed set in the port capabilities, in Mb/s. */
8485 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
8487 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8489 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8493 TEST_SPEED_RETURN(400G, 400000);
8494 TEST_SPEED_RETURN(200G, 200000);
8495 TEST_SPEED_RETURN(100G, 100000);
8496 TEST_SPEED_RETURN(50G, 50000);
8497 TEST_SPEED_RETURN(40G, 40000);
8498 TEST_SPEED_RETURN(25G, 25000);
8499 TEST_SPEED_RETURN(10G, 10000);
8500 TEST_SPEED_RETURN(1G, 1000);
8501 TEST_SPEED_RETURN(100M, 100);
8503 #undef TEST_SPEED_RETURN
8509 * fwcap_to_fwspeed - return highest speed in Port Capabilities
8510 * @acaps: advertised Port Capabilities
8512 * Get the highest speed for the port from the advertised Port
8513 * Capabilities. It will be either the highest speed from the list of
8514 * speeds or whatever user has set using ethtool.
8516 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
8518 #define TEST_SPEED_RETURN(__caps_speed) \
8520 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8521 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8524 TEST_SPEED_RETURN(400G);
8525 TEST_SPEED_RETURN(200G);
8526 TEST_SPEED_RETURN(100G);
8527 TEST_SPEED_RETURN(50G);
8528 TEST_SPEED_RETURN(40G);
8529 TEST_SPEED_RETURN(25G);
8530 TEST_SPEED_RETURN(10G);
8531 TEST_SPEED_RETURN(1G);
8532 TEST_SPEED_RETURN(100M);
8534 #undef TEST_SPEED_RETURN
8540 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8541 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8543 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8544 * 32-bit Port Capabilities value.
8546 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
8548 fw_port_cap32_t linkattr = 0;
8550 /* Unfortunately the format of the Link Status in the old
8551 * 16-bit Port Information message isn't the same as the
8552 * 16-bit Port Capabilities bitfield used everywhere else ...
8554 if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8555 linkattr |= FW_PORT_CAP32_FC_RX;
8556 if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8557 linkattr |= FW_PORT_CAP32_FC_TX;
8558 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8559 linkattr |= FW_PORT_CAP32_SPEED_100M;
8560 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8561 linkattr |= FW_PORT_CAP32_SPEED_1G;
8562 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8563 linkattr |= FW_PORT_CAP32_SPEED_10G;
8564 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8565 linkattr |= FW_PORT_CAP32_SPEED_25G;
8566 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8567 linkattr |= FW_PORT_CAP32_SPEED_40G;
8568 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8569 linkattr |= FW_PORT_CAP32_SPEED_100G;
8575 * t4_handle_get_port_info - process a FW reply message
8576 * @pi: the port info
8577 * @rpl: start of the FW message
8579 * Processes a GET_PORT_INFO FW reply message.
8581 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8583 const struct fw_port_cmd *cmd = (const void *)rpl;
8584 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8585 struct link_config *lc = &pi->link_cfg;
8586 struct adapter *adapter = pi->adapter;
8587 unsigned int speed, fc, fec, adv_fc;
8588 enum fw_port_module_type mod_type;
8589 int action, link_ok, linkdnrc;
8590 enum fw_port_type port_type;
8592 /* Extract the various fields from the Port Information message.
8594 action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8596 case FW_PORT_ACTION_GET_PORT_INFO: {
8597 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8599 link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8600 linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8601 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8602 mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8603 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8604 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8605 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8606 linkattr = lstatus_to_fwcap(lstatus);
8610 case FW_PORT_ACTION_GET_PORT_INFO32: {
8613 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8614 link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8615 linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8616 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8617 mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8618 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8619 acaps = be32_to_cpu(cmd->u.info32.acaps32);
8620 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8621 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8626 dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8627 be32_to_cpu(cmd->action_to_len16));
8631 fec = fwcap_to_cc_fec(acaps);
8632 adv_fc = fwcap_to_cc_pause(acaps);
8633 fc = fwcap_to_cc_pause(linkattr);
8634 speed = fwcap_to_speed(linkattr);
8636 /* Reset state for communicating new Transceiver Module status and
8637 * whether the OS-dependent layer wants us to redo the current
8638 * "sticky" L1 Configure Link Parameters.
8640 lc->new_module = false;
8641 lc->redo_l1cfg = false;
8643 if (mod_type != pi->mod_type) {
8644 /* With the newer SFP28 and QSFP28 Transceiver Module Types,
8645 * various fundamental Port Capabilities which used to be
8646 * immutable can now change radically. We can now have
8647 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8648 * all change based on what Transceiver Module is inserted.
8649 * So we need to record the Physical "Port" Capabilities on
8650 * every Transceiver Module change.
8654 /* When a new Transceiver Module is inserted, the Firmware
8655 * will examine its i2c EPROM to determine its type and
8656 * general operating parameters including things like Forward
8657 * Error Control, etc. Various IEEE 802.3 standards dictate
8658 * how to interpret these i2c values to determine default
8659 * "sutomatic" settings. We record these for future use when
8660 * the user explicitly requests these standards-based values.
8662 lc->def_acaps = acaps;
8664 /* Some versions of the early T6 Firmware "cheated" when
8665 * handling different Transceiver Modules by changing the
8666 * underlaying Port Type reported to the Host Drivers. As
8667 * such we need to capture whatever Port Type the Firmware
8668 * sends us and record it in case it's different from what we
8669 * were told earlier. Unfortunately, since Firmware is
8670 * forever, we'll need to keep this code here forever, but in
8671 * later T6 Firmware it should just be an assignment of the
8672 * same value already recorded.
8674 pi->port_type = port_type;
8676 /* Record new Module Type information.
8678 pi->mod_type = mod_type;
8680 /* Let the OS-dependent layer know if we have a new
8681 * Transceiver Module inserted.
8683 lc->new_module = t4_is_inserted_mod_type(mod_type);
8685 t4_os_portmod_changed(adapter, pi->port_id);
8688 if (link_ok != lc->link_ok || speed != lc->speed ||
8689 fc != lc->fc || adv_fc != lc->advertised_fc ||
8691 /* something changed */
8692 if (!link_ok && lc->link_ok) {
8693 lc->link_down_rc = linkdnrc;
8694 dev_warn_ratelimited(adapter->pdev_dev,
8695 "Port %d link down, reason: %s\n",
8697 t4_link_down_rc_str(linkdnrc));
8699 lc->link_ok = link_ok;
8701 lc->advertised_fc = adv_fc;
8705 lc->lpacaps = lpacaps;
8706 lc->acaps = acaps & ADVERT_MASK;
8708 /* If we're not physically capable of Auto-Negotiation, note
8709 * this as Auto-Negotiation disabled. Otherwise, we track
8710 * what Auto-Negotiation settings we have. Note parallel
8711 * structure in t4_link_l1cfg_core() and init_link_config().
8713 if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
8714 lc->autoneg = AUTONEG_DISABLE;
8715 } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
8716 lc->autoneg = AUTONEG_ENABLE;
8718 /* When Autoneg is disabled, user needs to set
8720 * Similar to cxgb4_ethtool.c: set_link_ksettings
8723 lc->speed_caps = fwcap_to_fwspeed(acaps);
8724 lc->autoneg = AUTONEG_DISABLE;
8727 t4_os_link_changed(adapter, pi->port_id, link_ok);
8730 /* If we have a new Transceiver Module and the OS-dependent code has
8731 * told us that it wants us to redo whatever "sticky" L1 Configuration
8732 * Link Parameters are set, do that now.
8734 if (lc->new_module && lc->redo_l1cfg) {
8735 struct link_config old_lc;
8738 /* Save the current L1 Configuration and restore it if an
8739 * error occurs. We probably should fix the l1_cfg*()
8740 * routines not to change the link_config when an error
8744 ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
8747 dev_warn(adapter->pdev_dev,
8748 "Attempt to update new Transceiver Module settings failed\n");
8751 lc->new_module = false;
8752 lc->redo_l1cfg = false;
8756 * t4_update_port_info - retrieve and update port information if changed
8757 * @pi: the port_info
8759 * We issue a Get Port Information Command to the Firmware and, if
8760 * successful, we check to see if anything is different from what we
8761 * last recorded and update things accordingly.
8763 int t4_update_port_info(struct port_info *pi)
8765 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8766 struct fw_port_cmd port_cmd;
8769 memset(&port_cmd, 0, sizeof(port_cmd));
8770 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8771 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8772 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8773 port_cmd.action_to_len16 = cpu_to_be32(
8774 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8775 ? FW_PORT_ACTION_GET_PORT_INFO
8776 : FW_PORT_ACTION_GET_PORT_INFO32) |
8777 FW_LEN16(port_cmd));
8778 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8779 &port_cmd, sizeof(port_cmd), &port_cmd);
8783 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8788 * t4_get_link_params - retrieve basic link parameters for given port
8790 * @link_okp: value return pointer for link up/down
8791 * @speedp: value return pointer for speed (Mb/s)
8792 * @mtup: value return pointer for mtu
8794 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8795 * and MTU for a specified port. A negative error is returned on
8796 * failure; 0 on success.
8798 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8799 unsigned int *speedp, unsigned int *mtup)
8801 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8802 unsigned int action, link_ok, mtu;
8803 struct fw_port_cmd port_cmd;
8804 fw_port_cap32_t linkattr;
8807 memset(&port_cmd, 0, sizeof(port_cmd));
8808 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8809 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8810 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8811 action = (fw_caps == FW_CAPS16
8812 ? FW_PORT_ACTION_GET_PORT_INFO
8813 : FW_PORT_ACTION_GET_PORT_INFO32);
8814 port_cmd.action_to_len16 = cpu_to_be32(
8815 FW_PORT_CMD_ACTION_V(action) |
8816 FW_LEN16(port_cmd));
8817 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8818 &port_cmd, sizeof(port_cmd), &port_cmd);
8822 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8823 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8825 link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8826 linkattr = lstatus_to_fwcap(lstatus);
8827 mtu = be16_to_cpu(port_cmd.u.info.mtu);
8830 be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8832 link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8833 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8834 mtu = FW_PORT_CMD_MTU32_G(
8835 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8839 *link_okp = link_ok;
8841 *speedp = fwcap_to_speed(linkattr);
8849 * t4_handle_fw_rpl - process a FW reply message
8850 * @adap: the adapter
8851 * @rpl: start of the FW message
8853 * Processes a FW message, such as link state change messages.
8855 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8857 u8 opcode = *(const u8 *)rpl;
8859 /* This might be a port command ... this simplifies the following
8860 * conditionals ... We can get away with pre-dereferencing
8861 * action_to_len16 because it's in the first 16 bytes and all messages
8862 * will be at least that long.
8864 const struct fw_port_cmd *p = (const void *)rpl;
8865 unsigned int action =
8866 FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8868 if (opcode == FW_PORT_CMD &&
8869 (action == FW_PORT_ACTION_GET_PORT_INFO ||
8870 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8872 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
8873 struct port_info *pi = NULL;
8875 for_each_port(adap, i) {
8876 pi = adap2pinfo(adap, i);
8877 if (pi->tx_chan == chan)
8881 t4_handle_get_port_info(pi, rpl);
8883 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8890 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
8894 if (pci_is_pcie(adapter->pdev)) {
8895 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
8896 p->speed = val & PCI_EXP_LNKSTA_CLS;
8897 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8902 * init_link_config - initialize a link's SW state
8903 * @lc: pointer to structure holding the link state
8904 * @pcaps: link Port Capabilities
8905 * @acaps: link current Advertised Port Capabilities
8907 * Initializes the SW state maintained for each link, including the link's
8908 * capabilities and default speed/flow-control/autonegotiation settings.
8910 static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8911 fw_port_cap32_t acaps)
8914 lc->def_acaps = acaps;
8918 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8920 /* For Forward Error Control, we default to whatever the Firmware
8921 * tells us the Link is currently advertising.
8923 lc->requested_fec = FEC_AUTO;
8924 lc->fec = fwcap_to_cc_fec(lc->def_acaps);
8926 /* If the Port is capable of Auto-Negtotiation, initialize it as
8927 * "enabled" and copy over all of the Physical Port Capabilities
8928 * to the Advertised Port Capabilities. Otherwise mark it as
8929 * Auto-Negotiate disabled and select the highest supported speed
8930 * for the link. Note parallel structure in t4_link_l1cfg_core()
8931 * and t4_handle_get_port_info().
8933 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8934 lc->acaps = lc->pcaps & ADVERT_MASK;
8935 lc->autoneg = AUTONEG_ENABLE;
8936 lc->requested_fc |= PAUSE_AUTONEG;
8939 lc->autoneg = AUTONEG_DISABLE;
8940 lc->speed_caps = fwcap_to_fwspeed(acaps);
8944 #define CIM_PF_NOACCESS 0xeeeeeeee
8946 int t4_wait_dev_ready(void __iomem *regs)
8950 whoami = readl(regs + PL_WHOAMI_A);
8951 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
8955 whoami = readl(regs + PL_WHOAMI_A);
8956 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
8960 u32 vendor_and_model_id;
8964 static int t4_get_flash_params(struct adapter *adap)
8966 /* Table for non-Numonix supported flash parts. Numonix parts are left
8967 * to the preexisting code. All flash parts have 64KB sectors.
8969 static struct flash_desc supported_flash[] = {
8970 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8973 unsigned int part, manufacturer;
8974 unsigned int density, size = 0;
8978 /* Issue a Read ID Command to the Flash part. We decode supported
8979 * Flash parts and their sizes from this. There's a newer Query
8980 * Command which can retrieve detailed geometry information but many
8981 * Flash parts don't support it.
8984 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
8986 ret = sf1_read(adap, 3, 0, 1, &flashid);
8987 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
8991 /* Check to see if it's one of our non-standard supported Flash parts.
8993 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8994 if (supported_flash[part].vendor_and_model_id == flashid) {
8995 adap->params.sf_size = supported_flash[part].size_mb;
8996 adap->params.sf_nsec =
8997 adap->params.sf_size / SF_SEC_SIZE;
9001 /* Decode Flash part size. The code below looks repetitive with
9002 * common encodings, but that's not guaranteed in the JEDEC
9003 * specification for the Read JEDEC ID command. The only thing that
9004 * we're guaranteed by the JEDEC specification is where the
9005 * Manufacturer ID is in the returned result. After that each
9006 * Manufacturer ~could~ encode things completely differently.
9007 * Note, all Flash parts must have 64KB sectors.
9009 manufacturer = flashid & 0xff;
9010 switch (manufacturer) {
9011 case 0x20: { /* Micron/Numonix */
9012 /* This Density -> Size decoding table is taken from Micron
9015 density = (flashid >> 16) & 0xff;
9017 case 0x14: /* 1MB */
9020 case 0x15: /* 2MB */
9023 case 0x16: /* 4MB */
9026 case 0x17: /* 8MB */
9029 case 0x18: /* 16MB */
9032 case 0x19: /* 32MB */
9035 case 0x20: /* 64MB */
9038 case 0x21: /* 128MB */
9041 case 0x22: /* 256MB */
9047 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
9048 /* This Density -> Size decoding table is taken from ISSI
9051 density = (flashid >> 16) & 0xff;
9053 case 0x16: /* 32 MB */
9056 case 0x17: /* 64MB */
9062 case 0xc2: { /* Macronix */
9063 /* This Density -> Size decoding table is taken from Macronix
9066 density = (flashid >> 16) & 0xff;
9068 case 0x17: /* 8MB */
9071 case 0x18: /* 16MB */
9077 case 0xef: { /* Winbond */
9078 /* This Density -> Size decoding table is taken from Winbond
9081 density = (flashid >> 16) & 0xff;
9083 case 0x17: /* 8MB */
9086 case 0x18: /* 16MB */
9094 /* If we didn't recognize the FLASH part, that's no real issue: the
9095 * Hardware/Software contract says that Hardware will _*ALWAYS*_
9096 * use a FLASH part which is at least 4MB in size and has 64KB
9097 * sectors. The unrecognized FLASH part is likely to be much larger
9098 * than 4MB, but that's all we really need.
9101 dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
9106 /* Store decoded Flash size and fall through into vetting code. */
9107 adap->params.sf_size = size;
9108 adap->params.sf_nsec = size / SF_SEC_SIZE;
9111 if (adap->params.sf_size < FLASH_MIN_SIZE)
9112 dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9113 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
9118 * t4_prep_adapter - prepare SW and HW for operation
9119 * @adapter: the adapter
9121 * Initialize adapter SW state for the various HW modules, set initial
9122 * values for some adapter tunables, take PHYs out of reset, and
9123 * initialize the MDIO interface.
9125 int t4_prep_adapter(struct adapter *adapter)
9131 get_pci_mode(adapter, &adapter->params.pci);
9132 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
9134 ret = t4_get_flash_params(adapter);
9136 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
9140 /* Retrieve adapter's device ID
9142 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
9143 ver = device_id >> 12;
9144 adapter->params.chip = 0;
9147 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
9148 adapter->params.arch.sge_fl_db = DBPRIO_F;
9149 adapter->params.arch.mps_tcam_size =
9150 NUM_MPS_CLS_SRAM_L_INSTANCES;
9151 adapter->params.arch.mps_rplc_size = 128;
9152 adapter->params.arch.nchan = NCHAN;
9153 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9154 adapter->params.arch.vfcount = 128;
9155 /* Congestion map is for 4 channels so that
9156 * MPS can have 4 priority per port.
9158 adapter->params.arch.cng_ch_bits_log = 2;
9161 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
9162 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
9163 adapter->params.arch.mps_tcam_size =
9164 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9165 adapter->params.arch.mps_rplc_size = 128;
9166 adapter->params.arch.nchan = NCHAN;
9167 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9168 adapter->params.arch.vfcount = 128;
9169 adapter->params.arch.cng_ch_bits_log = 2;
9172 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
9173 adapter->params.arch.sge_fl_db = 0;
9174 adapter->params.arch.mps_tcam_size =
9175 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9176 adapter->params.arch.mps_rplc_size = 256;
9177 adapter->params.arch.nchan = 2;
9178 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
9179 adapter->params.arch.vfcount = 256;
9180 /* Congestion map will be for 2 channels so that
9181 * MPS can have 8 priority per port.
9183 adapter->params.arch.cng_ch_bits_log = 3;
9186 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
9191 adapter->params.cim_la_size = CIMLA_SIZE;
9192 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
9195 * Default port for debugging in case we can't reach FW.
9197 adapter->params.nports = 1;
9198 adapter->params.portvec = 1;
9199 adapter->params.vpd.cclk = 50000;
9201 /* Set PCIe completion timeout to 4 seconds. */
9202 pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
9203 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
9208 * t4_shutdown_adapter - shut down adapter, host & wire
9209 * @adapter: the adapter
9211 * Perform an emergency shutdown of the adapter and stop it from
9212 * continuing any further communication on the ports or DMA to the
9213 * host. This is typically used when the adapter and/or firmware
9214 * have crashed and we want to prevent any further accidental
9215 * communication with the rest of the world. This will also force
9216 * the port Link Status to go down -- if register writes work --
9217 * which should help our peers figure out that we're down.
9219 int t4_shutdown_adapter(struct adapter *adapter)
9223 t4_intr_disable(adapter);
9224 t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
9225 for_each_port(adapter, port) {
9226 u32 a_port_cfg = is_t4(adapter->params.chip) ?
9227 PORT_REG(port, XGMAC_PORT_CFG_A) :
9228 T5_PORT_REG(port, MAC_PORT_CFG_A);
9230 t4_write_reg(adapter, a_port_cfg,
9231 t4_read_reg(adapter, a_port_cfg)
9232 & ~SIGNAL_DET_V(1));
9234 t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
9240 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9241 * @adapter: the adapter
9242 * @qid: the Queue ID
9243 * @qtype: the Ingress or Egress type for @qid
9244 * @user: true if this request is for a user mode queue
9245 * @pbar2_qoffset: BAR2 Queue Offset
9246 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9248 * Returns the BAR2 SGE Queue Registers information associated with the
9249 * indicated Absolute Queue ID. These are passed back in return value
9250 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9251 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9253 * This may return an error which indicates that BAR2 SGE Queue
9254 * registers aren't available. If an error is not returned, then the
9255 * following values are returned:
9257 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9258 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9260 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9261 * require the "Inferred Queue ID" ability may be used. E.g. the
9262 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9263 * then these "Inferred Queue ID" register may not be used.
9265 int t4_bar2_sge_qregs(struct adapter *adapter,
9267 enum t4_bar2_qtype qtype,
9270 unsigned int *pbar2_qid)
9272 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9273 u64 bar2_page_offset, bar2_qoffset;
9274 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9276 /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
9277 if (!user && is_t4(adapter->params.chip))
9280 /* Get our SGE Page Size parameters.
9282 page_shift = adapter->params.sge.hps + 10;
9283 page_size = 1 << page_shift;
9285 /* Get the right Queues per Page parameters for our Queue.
9287 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9288 ? adapter->params.sge.eq_qpp
9289 : adapter->params.sge.iq_qpp);
9290 qpp_mask = (1 << qpp_shift) - 1;
9292 /* Calculate the basics of the BAR2 SGE Queue register area:
9293 * o The BAR2 page the Queue registers will be in.
9294 * o The BAR2 Queue ID.
9295 * o The BAR2 Queue ID Offset into the BAR2 page.
9297 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9298 bar2_qid = qid & qpp_mask;
9299 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9301 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
9302 * hardware will infer the Absolute Queue ID simply from the writes to
9303 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9304 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
9305 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9306 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9307 * from the BAR2 Page and BAR2 Queue ID.
9309 * One important censequence of this is that some BAR2 SGE registers
9310 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9311 * there. But other registers synthesize the SGE Queue ID purely
9312 * from the writes to the registers -- the Write Combined Doorbell
9313 * Buffer is a good example. These BAR2 SGE Registers are only
9314 * available for those BAR2 SGE Register areas where the SGE Absolute
9315 * Queue ID can be inferred from simple writes.
9317 bar2_qoffset = bar2_page_offset;
9318 bar2_qinferred = (bar2_qid_offset < page_size);
9319 if (bar2_qinferred) {
9320 bar2_qoffset += bar2_qid_offset;
9324 *pbar2_qoffset = bar2_qoffset;
9325 *pbar2_qid = bar2_qid;
9330 * t4_init_devlog_params - initialize adapter->params.devlog
9331 * @adap: the adapter
9333 * Initialize various fields of the adapter's Firmware Device Log
9334 * Parameters structure.
9336 int t4_init_devlog_params(struct adapter *adap)
9338 struct devlog_params *dparams = &adap->params.devlog;
9340 unsigned int devlog_meminfo;
9341 struct fw_devlog_cmd devlog_cmd;
9344 /* If we're dealing with newer firmware, the Device Log Parameters
9345 * are stored in a designated register which allows us to access the
9346 * Device Log even if we can't talk to the firmware.
9349 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
9351 unsigned int nentries, nentries128;
9353 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
9354 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
9356 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
9357 nentries = (nentries128 + 1) * 128;
9358 dparams->size = nentries * sizeof(struct fw_devlog_e);
9363 /* Otherwise, ask the firmware for it's Device Log Parameters.
9365 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9366 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
9367 FW_CMD_REQUEST_F | FW_CMD_READ_F);
9368 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9369 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9375 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9376 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
9377 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
9378 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9384 * t4_init_sge_params - initialize adap->params.sge
9385 * @adapter: the adapter
9387 * Initialize various fields of the adapter's SGE Parameters structure.
9389 int t4_init_sge_params(struct adapter *adapter)
9391 struct sge_params *sge_params = &adapter->params.sge;
9393 unsigned int s_hps, s_qpp;
9395 /* Extract the SGE Page Size for our PF.
9397 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
9398 s_hps = (HOSTPAGESIZEPF0_S +
9399 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
9400 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
9402 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
9404 s_qpp = (QUEUESPERPAGEPF0_S +
9405 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
9406 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
9407 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9408 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
9409 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9415 * t4_init_tp_params - initialize adap->params.tp
9416 * @adap: the adapter
9417 * @sleep_ok: if true we may sleep while awaiting command completion
9419 * Initialize various fields of the adapter's TP Parameters structure.
9421 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9427 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
9428 adap->params.tp.tre = TIMERRESOLUTION_G(v);
9429 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
9431 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9432 for (chan = 0; chan < NCHAN; chan++)
9433 adap->params.tp.tx_modq[chan] = chan;
9435 /* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
9438 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
9439 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FILTER) |
9440 FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_FILTER_MODE_MASK));
9442 /* Read current value */
9443 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
9446 dev_info(adap->pdev_dev,
9447 "Current filter mode/mask 0x%x:0x%x\n",
9448 FW_PARAMS_PARAM_FILTER_MODE_G(val),
9449 FW_PARAMS_PARAM_FILTER_MASK_G(val));
9450 adap->params.tp.vlan_pri_map =
9451 FW_PARAMS_PARAM_FILTER_MODE_G(val);
9452 adap->params.tp.filter_mask =
9453 FW_PARAMS_PARAM_FILTER_MASK_G(val);
9455 dev_info(adap->pdev_dev,
9456 "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
9458 /* Incase of older-fw (which doesn't expose the api
9459 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
9460 * the fw api) combination, fall-back to older method of reading
9461 * the filter mode from indirect-register
9463 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
9464 TP_VLAN_PRI_MAP_A, sleep_ok);
9466 /* With the older-fw and newer-driver combination we might run
9467 * into an issue when user wants to use hash filter region but
9468 * the filter_mask is zero, in this case filter_mask validation
9469 * is tough. To avoid that we set the filter_mask same as filter
9470 * mode, which will behave exactly as the older way of ignoring
9471 * the filter mask validation.
9473 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
9476 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
9477 TP_INGRESS_CONFIG_A, sleep_ok);
9479 /* For T6, cache the adapter's compressed error vector
9480 * and passing outer header info for encapsulated packets.
9482 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
9483 v = t4_read_reg(adap, TP_OUT_CONFIG_A);
9484 adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
9487 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9488 * shift positions of several elements of the Compressed Filter Tuple
9489 * for this adapter which we need frequently ...
9491 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
9492 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
9493 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
9494 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
9495 adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
9496 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
9498 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
9500 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
9502 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
9504 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
9507 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
9508 * represents the presence of an Outer VLAN instead of a VNIC ID.
9510 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
9511 adap->params.tp.vnic_shift = -1;
9513 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
9514 adap->params.tp.hash_filter_mask = v;
9515 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
9516 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
9521 * t4_filter_field_shift - calculate filter field shift
9522 * @adap: the adapter
9523 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9525 * Return the shift position of a filter field within the Compressed
9526 * Filter Tuple. The filter field is specified via its selection bit
9527 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
9529 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9531 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9535 if ((filter_mode & filter_sel) == 0)
9538 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9539 switch (filter_mode & sel) {
9541 field_shift += FT_FCOE_W;
9544 field_shift += FT_PORT_W;
9547 field_shift += FT_VNIC_ID_W;
9550 field_shift += FT_VLAN_W;
9553 field_shift += FT_TOS_W;
9556 field_shift += FT_PROTOCOL_W;
9559 field_shift += FT_ETHERTYPE_W;
9562 field_shift += FT_MACMATCH_W;
9565 field_shift += FT_MPSHITTYPE_W;
9567 case FRAGMENTATION_F:
9568 field_shift += FT_FRAGMENTATION_W;
9575 int t4_init_rss_mode(struct adapter *adap, int mbox)
9578 struct fw_rss_vi_config_cmd rvc;
9580 memset(&rvc, 0, sizeof(rvc));
9582 for_each_port(adap, i) {
9583 struct port_info *p = adap2pinfo(adap, i);
9586 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
9587 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9588 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
9589 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
9590 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9593 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
9599 * t4_init_portinfo - allocate a virtual interface and initialize port_info
9600 * @pi: the port_info
9601 * @mbox: mailbox to use for the FW command
9602 * @port: physical port associated with the VI
9603 * @pf: the PF owning the VI
9604 * @vf: the VF owning the VI
9605 * @mac: the MAC address of the VI
9607 * Allocates a virtual interface for the given physical port. If @mac is
9608 * not %NULL it contains the MAC address of the VI as assigned by FW.
9609 * @mac should be large enough to hold an Ethernet address.
9610 * Returns < 0 on error.
9612 int t4_init_portinfo(struct port_info *pi, int mbox,
9613 int port, int pf, int vf, u8 mac[])
9615 struct adapter *adapter = pi->adapter;
9616 unsigned int fw_caps = adapter->params.fw_caps_support;
9617 struct fw_port_cmd cmd;
9618 unsigned int rss_size;
9619 enum fw_port_type port_type;
9621 fw_port_cap32_t pcaps, acaps;
9622 u8 vivld = 0, vin = 0;
9625 /* If we haven't yet determined whether we're talking to Firmware
9626 * which knows the new 32-bit Port Capabilities, it's time to find
9627 * out now. This will also tell new Firmware to send us Port Status
9628 * Updates using the new 32-bit Port Capabilities version of the
9629 * Port Information message.
9631 if (fw_caps == FW_CAPS_UNKNOWN) {
9634 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
9635 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
9637 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val);
9638 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
9639 adapter->params.fw_caps_support = fw_caps;
9642 memset(&cmd, 0, sizeof(cmd));
9643 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
9644 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9645 FW_PORT_CMD_PORTID_V(port));
9646 cmd.action_to_len16 = cpu_to_be32(
9647 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9648 ? FW_PORT_ACTION_GET_PORT_INFO
9649 : FW_PORT_ACTION_GET_PORT_INFO32) |
9651 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
9655 /* Extract the various fields from the Port Information message.
9657 if (fw_caps == FW_CAPS16) {
9658 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9660 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9661 mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9662 ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9664 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9665 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9667 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9669 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9670 mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9671 ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9673 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9674 acaps = be32_to_cpu(cmd.u.info32.acaps32);
9677 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size,
9685 pi->rss_size = rss_size;
9686 pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
9688 /* If fw supports returning the VIN as part of FW_VI_CMD,
9689 * save the returned values.
9691 if (adapter->params.viid_smt_extn_support) {
9695 /* Retrieve the values from VIID */
9696 pi->vivld = FW_VIID_VIVLD_G(pi->viid);
9697 pi->vin = FW_VIID_VIN_G(pi->viid);
9700 pi->port_type = port_type;
9701 pi->mdio_addr = mdio_addr;
9702 pi->mod_type = FW_PORT_MOD_TYPE_NA;
9704 init_link_config(&pi->link_cfg, pcaps, acaps);
9708 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9713 for_each_port(adap, i) {
9714 struct port_info *pi = adap2pinfo(adap, i);
9716 while ((adap->params.portvec & (1 << j)) == 0)
9719 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9723 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
9729 int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
9734 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
9746 * t4_read_cimq_cfg - read CIM queue configuration
9747 * @adap: the adapter
9748 * @base: holds the queue base addresses in bytes
9749 * @size: holds the queue sizes in bytes
9750 * @thres: holds the queue full thresholds in bytes
9752 * Returns the current configuration of the CIM queues, starting with
9753 * the IBQs, then the OBQs.
9755 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9758 int cim_num_obq = is_t4(adap->params.chip) ?
9759 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9761 for (i = 0; i < CIM_NUM_IBQ; i++) {
9762 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9764 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9765 /* value is in 256-byte units */
9766 *base++ = CIMQBASE_G(v) * 256;
9767 *size++ = CIMQSIZE_G(v) * 256;
9768 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9770 for (i = 0; i < cim_num_obq; i++) {
9771 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9773 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9774 /* value is in 256-byte units */
9775 *base++ = CIMQBASE_G(v) * 256;
9776 *size++ = CIMQSIZE_G(v) * 256;
9781 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9782 * @adap: the adapter
9783 * @qid: the queue index
9784 * @data: where to store the queue contents
9785 * @n: capacity of @data in 32-bit words
9787 * Reads the contents of the selected CIM queue starting at address 0 up
9788 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9789 * error and the number of 32-bit words actually read on success.
9791 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9793 int i, err, attempts;
9795 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9797 if (qid > 5 || (n & 3))
9800 addr = qid * nwords;
9804 /* It might take 3-10ms before the IBQ debug read access is allowed.
9805 * Wait for 1 Sec with a delay of 1 usec.
9809 for (i = 0; i < n; i++, addr++) {
9810 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9812 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9816 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9818 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
9823 * t4_read_cim_obq - read the contents of a CIM outbound queue
9824 * @adap: the adapter
9825 * @qid: the queue index
9826 * @data: where to store the queue contents
9827 * @n: capacity of @data in 32-bit words
9829 * Reads the contents of the selected CIM queue starting at address 0 up
9830 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9831 * error and the number of 32-bit words actually read on success.
9833 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9836 unsigned int addr, v, nwords;
9837 int cim_num_obq = is_t4(adap->params.chip) ?
9838 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9840 if ((qid > (cim_num_obq - 1)) || (n & 3))
9843 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9844 QUENUMSELECT_V(qid));
9845 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9847 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
9848 nwords = CIMQSIZE_G(v) * 64; /* same */
9852 for (i = 0; i < n; i++, addr++) {
9853 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9855 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9859 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9861 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
9866 * t4_cim_read - read a block from CIM internal address space
9867 * @adap: the adapter
9868 * @addr: the start address within the CIM address space
9869 * @n: number of words to read
9870 * @valp: where to store the result
9872 * Reads a block of 4-byte words from the CIM intenal address space.
9874 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9879 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9882 for ( ; !ret && n--; addr += 4) {
9883 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9884 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9887 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9893 * t4_cim_write - write a block into CIM internal address space
9894 * @adap: the adapter
9895 * @addr: the start address within the CIM address space
9896 * @n: number of words to write
9897 * @valp: set of values to write
9899 * Writes a block of 4-byte words into the CIM intenal address space.
9901 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9902 const unsigned int *valp)
9906 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9909 for ( ; !ret && n--; addr += 4) {
9910 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9911 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9912 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9918 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9921 return t4_cim_write(adap, addr, 1, &val);
9925 * t4_cim_read_la - read CIM LA capture buffer
9926 * @adap: the adapter
9927 * @la_buf: where to store the LA data
9928 * @wrptr: the HW write pointer within the capture buffer
9930 * Reads the contents of the CIM LA buffer with the most recent entry at
9931 * the end of the returned data and with the entry at @wrptr first.
9932 * We try to leave the LA in the running state we find it in.
9934 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9937 unsigned int cfg, val, idx;
9939 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9943 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
9944 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9949 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9953 idx = UPDBGLAWRPTR_G(val);
9957 for (i = 0; i < adap->params.cim_la_size; i++) {
9958 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9959 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9962 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9965 if (val & UPDBGLARDEN_F) {
9969 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9973 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9974 * identify the 32-bit portion of the full 312-bit data
9976 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9977 idx = (idx & 0xff0) + 0x10;
9980 /* address can't exceed 0xfff */
9981 idx &= UPDBGLARDPTR_M;
9984 if (cfg & UPDBGLAEN_F) {
9985 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9986 cfg & ~UPDBGLARDEN_F);
9994 * t4_tp_read_la - read TP LA capture buffer
9995 * @adap: the adapter
9996 * @la_buf: where to store the LA data
9997 * @wrptr: the HW write pointer within the capture buffer
9999 * Reads the contents of the TP LA buffer with the most recent entry at
10000 * the end of the returned data and with the entry at @wrptr first.
10001 * We leave the LA in the running state we find it in.
10003 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
10005 bool last_incomplete;
10006 unsigned int i, cfg, val, idx;
10008 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
10009 if (cfg & DBGLAENABLE_F) /* freeze LA */
10010 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
10011 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
10013 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
10014 idx = DBGLAWPTR_G(val);
10015 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
10016 if (last_incomplete)
10017 idx = (idx + 1) & DBGLARPTR_M;
10022 val &= ~DBGLARPTR_V(DBGLARPTR_M);
10023 val |= adap->params.tp.la_mask;
10025 for (i = 0; i < TPLA_SIZE; i++) {
10026 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
10027 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
10028 idx = (idx + 1) & DBGLARPTR_M;
10031 /* Wipe out last entry if it isn't valid */
10032 if (last_incomplete)
10033 la_buf[TPLA_SIZE - 1] = ~0ULL;
10035 if (cfg & DBGLAENABLE_F) /* restore running state */
10036 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
10037 cfg | adap->params.tp.la_mask);
10040 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
10041 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
10042 * state for more than the Warning Threshold then we'll issue a warning about
10043 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
10044 * appears to be hung every Warning Repeat second till the situation clears.
10045 * If the situation clears, we'll note that as well.
10047 #define SGE_IDMA_WARN_THRESH 1
10048 #define SGE_IDMA_WARN_REPEAT 300
10051 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
10052 * @adapter: the adapter
10053 * @idma: the adapter IDMA Monitor state
10055 * Initialize the state of an SGE Ingress DMA Monitor.
10057 void t4_idma_monitor_init(struct adapter *adapter,
10058 struct sge_idma_monitor_state *idma)
10060 /* Initialize the state variables for detecting an SGE Ingress DMA
10061 * hang. The SGE has internal counters which count up on each clock
10062 * tick whenever the SGE finds its Ingress DMA State Engines in the
10063 * same state they were on the previous clock tick. The clock used is
10064 * the Core Clock so we have a limit on the maximum "time" they can
10065 * record; typically a very small number of seconds. For instance,
10066 * with a 600MHz Core Clock, we can only count up to a bit more than
10067 * 7s. So we'll synthesize a larger counter in order to not run the
10068 * risk of having the "timers" overflow and give us the flexibility to
10069 * maintain a Hung SGE State Machine of our own which operates across
10070 * a longer time frame.
10072 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
10073 idma->idma_stalled[0] = 0;
10074 idma->idma_stalled[1] = 0;
10078 * t4_idma_monitor - monitor SGE Ingress DMA state
10079 * @adapter: the adapter
10080 * @idma: the adapter IDMA Monitor state
10081 * @hz: number of ticks/second
10082 * @ticks: number of ticks since the last IDMA Monitor call
10084 void t4_idma_monitor(struct adapter *adapter,
10085 struct sge_idma_monitor_state *idma,
10088 int i, idma_same_state_cnt[2];
10090 /* Read the SGE Debug Ingress DMA Same State Count registers. These
10091 * are counters inside the SGE which count up on each clock when the
10092 * SGE finds its Ingress DMA State Engines in the same states they
10093 * were in the previous clock. The counters will peg out at
10094 * 0xffffffff without wrapping around so once they pass the 1s
10095 * threshold they'll stay above that till the IDMA state changes.
10097 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
10098 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
10099 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10101 for (i = 0; i < 2; i++) {
10102 u32 debug0, debug11;
10104 /* If the Ingress DMA Same State Counter ("timer") is less
10105 * than 1s, then we can reset our synthesized Stall Timer and
10106 * continue. If we have previously emitted warnings about a
10107 * potential stalled Ingress Queue, issue a note indicating
10108 * that the Ingress Queue has resumed forward progress.
10110 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
10111 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
10112 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
10113 "resumed after %d seconds\n",
10114 i, idma->idma_qid[i],
10115 idma->idma_stalled[i] / hz);
10116 idma->idma_stalled[i] = 0;
10120 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
10121 * domain. The first time we get here it'll be because we
10122 * passed the 1s Threshold; each additional time it'll be
10123 * because the RX Timer Callback is being fired on its regular
10126 * If the stall is below our Potential Hung Ingress Queue
10127 * Warning Threshold, continue.
10129 if (idma->idma_stalled[i] == 0) {
10130 idma->idma_stalled[i] = hz;
10131 idma->idma_warn[i] = 0;
10133 idma->idma_stalled[i] += ticks;
10134 idma->idma_warn[i] -= ticks;
10137 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
10140 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
10142 if (idma->idma_warn[i] > 0)
10144 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
10146 /* Read and save the SGE IDMA State and Queue ID information.
10147 * We do this every time in case it changes across time ...
10148 * can't be too careful ...
10150 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
10151 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10152 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
10154 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
10155 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10156 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
10158 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
10159 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
10160 i, idma->idma_qid[i], idma->idma_state[i],
10161 idma->idma_stalled[i] / hz,
10163 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
10168 * t4_load_cfg - download config file
10169 * @adap: the adapter
10170 * @cfg_data: the cfg text file to write
10171 * @size: text file size
10173 * Write the supplied config text file to the card's serial flash.
10175 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10177 int ret, i, n, cfg_addr;
10179 unsigned int flash_cfg_start_sec;
10180 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10182 cfg_addr = t4_flash_cfg_addr(adap);
10187 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10189 if (size > FLASH_CFG_MAX_SIZE) {
10190 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
10191 FLASH_CFG_MAX_SIZE);
10195 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
10197 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10198 flash_cfg_start_sec + i - 1);
10199 /* If size == 0 then we're simply erasing the FLASH sectors associated
10200 * with the on-adapter Firmware Configuration File.
10202 if (ret || size == 0)
10205 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10206 for (i = 0; i < size; i += SF_PAGE_SIZE) {
10207 if ((size - i) < SF_PAGE_SIZE)
10211 ret = t4_write_flash(adap, addr, n, cfg_data);
10215 addr += SF_PAGE_SIZE;
10216 cfg_data += SF_PAGE_SIZE;
10221 dev_err(adap->pdev_dev, "config file %s failed %d\n",
10222 (size == 0 ? "clear" : "download"), ret);
10227 * t4_set_vf_mac - Set MAC address for the specified VF
10228 * @adapter: The adapter
10229 * @vf: one of the VFs instantiated by the specified PF
10230 * @naddr: the number of MAC addresses
10231 * @addr: the MAC address(es) to be set to the specified VF
10233 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
10234 unsigned int naddr, u8 *addr)
10236 struct fw_acl_mac_cmd cmd;
10238 memset(&cmd, 0, sizeof(cmd));
10239 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
10242 FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
10243 FW_ACL_MAC_CMD_VFN_V(vf));
10245 /* Note: Do not enable the ACL */
10246 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
10249 switch (adapter->pf) {
10251 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
10254 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
10257 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
10260 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
10264 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
10268 * t4_read_pace_tbl - read the pace table
10269 * @adap: the adapter
10270 * @pace_vals: holds the returned values
10272 * Returns the values of TP's pace table in microseconds.
10274 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
10278 for (i = 0; i < NTX_SCHED; i++) {
10279 t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
10280 v = t4_read_reg(adap, TP_PACE_TABLE_A);
10281 pace_vals[i] = dack_ticks_to_usec(adap, v);
10286 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10287 * @adap: the adapter
10288 * @sched: the scheduler index
10289 * @kbps: the byte rate in Kbps
10290 * @ipg: the interpacket delay in tenths of nanoseconds
10291 * @sleep_ok: if true we may sleep while awaiting command completion
10293 * Return the current configuration of a HW Tx scheduler.
10295 void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
10296 unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
10298 unsigned int v, addr, bpt, cpt;
10301 addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
10302 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10305 bpt = (v >> 8) & 0xff;
10308 *kbps = 0; /* scheduler disabled */
10310 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10311 *kbps = (v * bpt) / 125;
10315 addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
10316 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10320 *ipg = (10000 * v) / core_ticks_per_usec(adap);
10324 /* t4_sge_ctxt_rd - read an SGE context through FW
10325 * @adap: the adapter
10326 * @mbox: mailbox to use for the FW command
10327 * @cid: the context id
10328 * @ctype: the context type
10329 * @data: where to store the context data
10331 * Issues a FW command through the given mailbox to read an SGE context.
10333 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10334 enum ctxt_type ctype, u32 *data)
10336 struct fw_ldst_cmd c;
10339 if (ctype == CTXT_FLM)
10340 ret = FW_LDST_ADDRSPC_SGE_FLMC;
10342 ret = FW_LDST_ADDRSPC_SGE_CONMC;
10344 memset(&c, 0, sizeof(c));
10345 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10346 FW_CMD_REQUEST_F | FW_CMD_READ_F |
10347 FW_LDST_CMD_ADDRSPACE_V(ret));
10348 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10349 c.u.idctxt.physid = cpu_to_be32(cid);
10351 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10353 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10354 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10355 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10356 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10357 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10358 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10364 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10365 * @adap: the adapter
10366 * @cid: the context id
10367 * @ctype: the context type
10368 * @data: where to store the context data
10370 * Reads an SGE context directly, bypassing FW. This is only for
10371 * debugging when FW is unavailable.
10373 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
10374 enum ctxt_type ctype, u32 *data)
10378 t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
10379 ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
10381 for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
10382 *data++ = t4_read_reg(adap, i);
10386 int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
10387 u8 rateunit, u8 ratemode, u8 channel, u8 class,
10388 u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
10391 struct fw_sched_cmd cmd;
10393 memset(&cmd, 0, sizeof(cmd));
10394 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
10397 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10399 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10400 cmd.u.params.type = type;
10401 cmd.u.params.level = level;
10402 cmd.u.params.mode = mode;
10403 cmd.u.params.ch = channel;
10404 cmd.u.params.cl = class;
10405 cmd.u.params.unit = rateunit;
10406 cmd.u.params.rate = ratemode;
10407 cmd.u.params.min = cpu_to_be32(minrate);
10408 cmd.u.params.max = cpu_to_be32(maxrate);
10409 cmd.u.params.weight = cpu_to_be16(weight);
10410 cmd.u.params.pktsize = cpu_to_be16(pktsize);
10411 cmd.u.params.burstsize = cpu_to_be16(burstsize);
10413 return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
10418 * t4_i2c_rd - read I2C data from adapter
10419 * @adap: the adapter
10420 * @mbox: mailbox to use for the FW command
10421 * @port: Port number if per-port device; <0 if not
10422 * @devid: per-port device ID or absolute device ID
10423 * @offset: byte offset into device I2C space
10424 * @len: byte length of I2C space data
10425 * @buf: buffer in which to return I2C data
10427 * Reads the I2C data from the indicated device and location.
10429 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
10430 unsigned int devid, unsigned int offset,
10431 unsigned int len, u8 *buf)
10433 struct fw_ldst_cmd ldst_cmd, ldst_rpl;
10434 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
10437 if (len > I2C_PAGE_SIZE)
10440 /* Dont allow reads that spans multiple pages */
10441 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
10444 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10445 ldst_cmd.op_to_addrspace =
10446 cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10449 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
10450 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
10451 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
10452 ldst_cmd.u.i2c.did = devid;
10455 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
10457 ldst_cmd.u.i2c.boffset = offset;
10458 ldst_cmd.u.i2c.blen = i2c_len;
10460 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
10465 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
10475 * t4_set_vlan_acl - Set a VLAN id for the specified VF
10476 * @adap: the adapter
10477 * @mbox: mailbox to use for the FW command
10478 * @vf: one of the VFs instantiated by the specified PF
10479 * @vlan: The vlanid to be set
10481 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
10484 struct fw_acl_vlan_cmd vlan_cmd;
10485 unsigned int enable;
10487 enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
10488 memset(&vlan_cmd, 0, sizeof(vlan_cmd));
10489 vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
10493 FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
10494 FW_ACL_VLAN_CMD_VFN_V(vf));
10495 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
10496 /* Drop all packets that donot match vlan id */
10497 vlan_cmd.dropnovlan_fm = (enable
10498 ? (FW_ACL_VLAN_CMD_DROPNOVLAN_F |
10499 FW_ACL_VLAN_CMD_FM_F) : 0);
10501 vlan_cmd.nvlan = 1;
10502 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
10505 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
10509 * modify_device_id - Modifies the device ID of the Boot BIOS image
10510 * @device_id: the device ID to write.
10511 * @boot_data: the boot image to modify.
10513 * Write the supplied device ID to the boot BIOS image.
10515 static void modify_device_id(int device_id, u8 *boot_data)
10517 struct cxgb4_pcir_data *pcir_header;
10518 struct legacy_pci_rom_hdr *header;
10519 u8 *cur_header = boot_data;
10522 /* Loop through all chained images and change the device ID's */
10524 header = (struct legacy_pci_rom_hdr *)cur_header;
10525 pcir_offset = le16_to_cpu(header->pcir_offset);
10526 pcir_header = (struct cxgb4_pcir_data *)(cur_header +
10530 * Only modify the Device ID if code type is Legacy or HP.
10531 * 0x00: Okay to modify
10532 * 0x01: FCODE. Do not modify
10533 * 0x03: Okay to modify
10534 * 0x04-0xFF: Do not modify
10536 if (pcir_header->code_type == CXGB4_HDR_CODE1) {
10541 * Modify Device ID to match current adatper
10543 pcir_header->device_id = cpu_to_le16(device_id);
10546 * Set checksum temporarily to 0.
10547 * We will recalculate it later.
10549 header->cksum = 0x0;
10552 * Calculate and update checksum
10554 for (i = 0; i < (header->size512 * 512); i++)
10555 csum += cur_header[i];
10558 * Invert summed value to create the checksum
10559 * Writing new checksum value directly to the boot data
10561 cur_header[7] = -csum;
10563 } else if (pcir_header->code_type == CXGB4_HDR_CODE2) {
10565 * Modify Device ID to match current adatper
10567 pcir_header->device_id = cpu_to_le16(device_id);
10571 * Move header pointer up to the next image in the ROM.
10573 cur_header += header->size512 * 512;
10574 } while (!(pcir_header->indicator & CXGB4_HDR_INDI));
10578 * t4_load_boot - download boot flash
10579 * @adap: the adapter
10580 * @boot_data: the boot image to write
10581 * @boot_addr: offset in flash to write boot_data
10582 * @size: image size
10584 * Write the supplied boot image to the card's serial flash.
10585 * The boot image has the following sections: a 28-byte header and the
10588 int t4_load_boot(struct adapter *adap, u8 *boot_data,
10589 unsigned int boot_addr, unsigned int size)
10591 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10592 unsigned int boot_sector = (boot_addr * 1024);
10593 struct cxgb4_pci_exp_rom_header *header;
10594 struct cxgb4_pcir_data *pcir_header;
10601 * Make sure the boot image does not encroach on the firmware region
10603 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10604 dev_err(adap->pdev_dev, "boot image encroaching on firmware region\n");
10608 /* Get boot header */
10609 header = (struct cxgb4_pci_exp_rom_header *)boot_data;
10610 pcir_offset = le16_to_cpu(header->pcir_offset);
10611 /* PCIR Data Structure */
10612 pcir_header = (struct cxgb4_pcir_data *)&boot_data[pcir_offset];
10615 * Perform some primitive sanity testing to avoid accidentally
10616 * writing garbage over the boot sectors. We ought to check for
10617 * more but it's not worth it for now ...
10619 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10620 dev_err(adap->pdev_dev, "boot image too small/large\n");
10624 if (le16_to_cpu(header->signature) != BOOT_SIGNATURE) {
10625 dev_err(adap->pdev_dev, "Boot image missing signature\n");
10629 /* Check PCI header signature */
10630 if (le32_to_cpu(pcir_header->signature) != PCIR_SIGNATURE) {
10631 dev_err(adap->pdev_dev, "PCI header missing signature\n");
10635 /* Check Vendor ID matches Chelsio ID*/
10636 if (le16_to_cpu(pcir_header->vendor_id) != PCI_VENDOR_ID_CHELSIO) {
10637 dev_err(adap->pdev_dev, "Vendor ID missing signature\n");
10642 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10643 * and Boot configuration data sections. These 3 boot sections span
10644 * sectors 0 to 7 in flash and live right before the FW image location.
10646 i = DIV_ROUND_UP(size ? size : FLASH_FW_START, sf_sec_size);
10647 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10648 (boot_sector >> 16) + i - 1);
10651 * If size == 0 then we're simply erasing the FLASH sectors associated
10652 * with the on-adapter option ROM file
10654 if (ret || size == 0)
10656 /* Retrieve adapter's device ID */
10657 pci_read_config_word(adap->pdev, PCI_DEVICE_ID, &device_id);
10658 /* Want to deal with PF 0 so I strip off PF 4 indicator */
10659 device_id = device_id & 0xf0ff;
10661 /* Check PCIE Device ID */
10662 if (le16_to_cpu(pcir_header->device_id) != device_id) {
10664 * Change the device ID in the Boot BIOS image to match
10665 * the Device ID of the current adapter.
10667 modify_device_id(device_id, boot_data);
10671 * Skip over the first SF_PAGE_SIZE worth of data and write it after
10672 * we finish copying the rest of the boot image. This will ensure
10673 * that the BIOS boot header will only be written if the boot image
10674 * was written in full.
10676 addr = boot_sector;
10677 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10678 addr += SF_PAGE_SIZE;
10679 boot_data += SF_PAGE_SIZE;
10680 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
10685 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10686 (const u8 *)header);
10690 dev_err(adap->pdev_dev, "boot image load failed, error %d\n",
10696 * t4_flash_bootcfg_addr - return the address of the flash
10697 * optionrom configuration
10698 * @adapter: the adapter
10700 * Return the address within the flash where the OptionROM Configuration
10701 * is stored, or an error if the device FLASH is too small to contain
10702 * a OptionROM Configuration.
10704 static int t4_flash_bootcfg_addr(struct adapter *adapter)
10707 * If the device FLASH isn't large enough to hold a Firmware
10708 * Configuration File, return an error.
10710 if (adapter->params.sf_size <
10711 FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10714 return FLASH_BOOTCFG_START;
10717 int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10719 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10720 struct cxgb4_bootcfg_data *header;
10721 unsigned int flash_cfg_start_sec;
10722 unsigned int addr, npad;
10723 int ret, i, n, cfg_addr;
10725 cfg_addr = t4_flash_bootcfg_addr(adap);
10730 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10732 if (size > FLASH_BOOTCFG_MAX_SIZE) {
10733 dev_err(adap->pdev_dev, "bootcfg file too large, max is %u bytes\n",
10734 FLASH_BOOTCFG_MAX_SIZE);
10738 header = (struct cxgb4_bootcfg_data *)cfg_data;
10739 if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) {
10740 dev_err(adap->pdev_dev, "Wrong bootcfg signature\n");
10745 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,
10747 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10748 flash_cfg_start_sec + i - 1);
10751 * If size == 0 then we're simply erasing the FLASH sectors associated
10752 * with the on-adapter OptionROM Configuration File.
10754 if (ret || size == 0)
10757 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10758 for (i = 0; i < size; i += SF_PAGE_SIZE) {
10759 n = min_t(u32, size - i, SF_PAGE_SIZE);
10761 ret = t4_write_flash(adap, addr, n, cfg_data);
10765 addr += SF_PAGE_SIZE;
10766 cfg_data += SF_PAGE_SIZE;
10769 npad = ((size + 4 - 1) & ~3) - size;
10770 for (i = 0; i < npad; i++) {
10773 ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
10780 dev_err(adap->pdev_dev, "boot config data %s failed %d\n",
10781 (size == 0 ? "clear" : "download"), ret);