1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #define ISPREG(vha) (&(vha)->hw->iobase->isp24)
10 #define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
11 #define IOBASE(vha) IOBAR(ISPREG(vha))
12 #define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
14 /* hardware_lock assumed held. */
16 qla27xx_write_remote_reg(struct scsi_qla_host *vha,
19 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
21 ql_dbg(ql_dbg_misc, vha, 0xd300,
22 "%s: addr/data = %xh/%xh\n", __func__, addr, data);
24 wrt_reg_dword(®->iobase_addr, 0x40);
25 wrt_reg_dword(®->iobase_c4, data);
26 wrt_reg_dword(®->iobase_window, addr);
30 qla27xx_reset_mpi(scsi_qla_host_t *vha)
32 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd301,
33 "Entered %s.\n", __func__);
35 qla27xx_write_remote_reg(vha, 0x104050, 0x40004);
36 qla27xx_write_remote_reg(vha, 0x10405c, 0x4);
38 vha->hw->stat.num_mpi_reset++;
42 qla27xx_insert16(uint16_t value, void *buf, ulong *len)
46 *(__le16 *)buf = cpu_to_le16(value);
48 *len += sizeof(value);
52 qla27xx_insert32(uint32_t value, void *buf, ulong *len)
56 *(__le32 *)buf = cpu_to_le32(value);
58 *len += sizeof(value);
62 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
64 if (buf && mem && size) {
66 memcpy(buf, mem, size);
72 qla27xx_read8(void __iomem *window, void *buf, ulong *len)
77 value = rd_reg_byte(window);
79 qla27xx_insert32(value, buf, len);
83 qla27xx_read16(void __iomem *window, void *buf, ulong *len)
88 value = rd_reg_word(window);
90 qla27xx_insert32(value, buf, len);
94 qla27xx_read32(void __iomem *window, void *buf, ulong *len)
99 value = rd_reg_dword(window);
101 qla27xx_insert32(value, buf, len);
104 static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
107 (width == 1) ? qla27xx_read8 :
108 (width == 2) ? qla27xx_read16 :
113 qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
114 uint offset, void *buf, ulong *len)
116 void __iomem *window = (void __iomem *)reg + offset;
118 qla27xx_read32(window, buf, len);
122 qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
123 uint offset, uint32_t data, void *buf)
126 void __iomem *window = (void __iomem *)reg + offset;
128 wrt_reg_dword(window, data);
133 qla27xx_read_window(__iomem struct device_reg_24xx *reg,
134 uint32_t addr, uint offset, uint count, uint width, void *buf,
137 void __iomem *window = (void __iomem *)reg + offset;
138 void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
140 qla27xx_write_reg(reg, IOBAR(reg), addr, buf);
142 qla27xx_insert32(addr, buf, len);
143 readn(window, buf, len);
150 qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
153 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
156 static inline struct qla27xx_fwdt_entry *
157 qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
159 return (void *)ent + le32_to_cpu(ent->hdr.size);
162 static struct qla27xx_fwdt_entry *
163 qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
164 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
166 ql_dbg(ql_dbg_misc, vha, 0xd100,
167 "%s: nop [%lx]\n", __func__, *len);
168 qla27xx_skip_entry(ent, buf);
170 return qla27xx_next_entry(ent);
173 static struct qla27xx_fwdt_entry *
174 qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
175 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
177 ql_dbg(ql_dbg_misc, vha, 0xd1ff,
178 "%s: end [%lx]\n", __func__, *len);
179 qla27xx_skip_entry(ent, buf);
185 static struct qla27xx_fwdt_entry *
186 qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
187 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
189 ulong addr = le32_to_cpu(ent->t256.base_addr);
190 uint offset = ent->t256.pci_offset;
191 ulong count = le16_to_cpu(ent->t256.reg_count);
192 uint width = ent->t256.reg_width;
194 ql_dbg(ql_dbg_misc, vha, 0xd200,
195 "%s: rdio t1 [%lx]\n", __func__, *len);
196 qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
198 return qla27xx_next_entry(ent);
201 static struct qla27xx_fwdt_entry *
202 qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
203 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
205 ulong addr = le32_to_cpu(ent->t257.base_addr);
206 uint offset = ent->t257.pci_offset;
207 ulong data = le32_to_cpu(ent->t257.write_data);
209 ql_dbg(ql_dbg_misc, vha, 0xd201,
210 "%s: wrio t1 [%lx]\n", __func__, *len);
211 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
212 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
214 return qla27xx_next_entry(ent);
217 static struct qla27xx_fwdt_entry *
218 qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
219 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
221 uint banksel = ent->t258.banksel_offset;
222 ulong bank = le32_to_cpu(ent->t258.bank);
223 ulong addr = le32_to_cpu(ent->t258.base_addr);
224 uint offset = ent->t258.pci_offset;
225 uint count = le16_to_cpu(ent->t258.reg_count);
226 uint width = ent->t258.reg_width;
228 ql_dbg(ql_dbg_misc, vha, 0xd202,
229 "%s: rdio t2 [%lx]\n", __func__, *len);
230 qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
231 qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
233 return qla27xx_next_entry(ent);
236 static struct qla27xx_fwdt_entry *
237 qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
238 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
240 ulong addr = le32_to_cpu(ent->t259.base_addr);
241 uint banksel = ent->t259.banksel_offset;
242 ulong bank = le32_to_cpu(ent->t259.bank);
243 uint offset = ent->t259.pci_offset;
244 ulong data = le32_to_cpu(ent->t259.write_data);
246 ql_dbg(ql_dbg_misc, vha, 0xd203,
247 "%s: wrio t2 [%lx]\n", __func__, *len);
248 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
249 qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
250 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
252 return qla27xx_next_entry(ent);
255 static struct qla27xx_fwdt_entry *
256 qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
257 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
259 uint offset = ent->t260.pci_offset;
261 ql_dbg(ql_dbg_misc, vha, 0xd204,
262 "%s: rdpci [%lx]\n", __func__, *len);
263 qla27xx_insert32(offset, buf, len);
264 qla27xx_read_reg(ISPREG(vha), offset, buf, len);
266 return qla27xx_next_entry(ent);
269 static struct qla27xx_fwdt_entry *
270 qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
271 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
273 uint offset = ent->t261.pci_offset;
274 ulong data = le32_to_cpu(ent->t261.write_data);
276 ql_dbg(ql_dbg_misc, vha, 0xd205,
277 "%s: wrpci [%lx]\n", __func__, *len);
278 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
280 return qla27xx_next_entry(ent);
283 static struct qla27xx_fwdt_entry *
284 qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
285 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
287 uint area = ent->t262.ram_area;
288 ulong start = le32_to_cpu(ent->t262.start_addr);
289 ulong end = le32_to_cpu(ent->t262.end_addr);
293 ql_dbg(ql_dbg_misc, vha, 0xd206,
294 "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
296 if (area == T262_RAM_AREA_CRITICAL_RAM) {
298 } else if (area == T262_RAM_AREA_EXTERNAL_RAM) {
299 end = vha->hw->fw_memory_size;
301 ent->t262.end_addr = cpu_to_le32(end);
302 } else if (area == T262_RAM_AREA_SHARED_RAM) {
303 start = vha->hw->fw_shared_ram_start;
304 end = vha->hw->fw_shared_ram_end;
306 ent->t262.start_addr = cpu_to_le32(start);
307 ent->t262.end_addr = cpu_to_le32(end);
309 } else if (area == T262_RAM_AREA_DDR_RAM) {
310 start = vha->hw->fw_ddr_ram_start;
311 end = vha->hw->fw_ddr_ram_end;
313 ent->t262.start_addr = cpu_to_le32(start);
314 ent->t262.end_addr = cpu_to_le32(end);
316 } else if (area == T262_RAM_AREA_MISC) {
318 ent->t262.start_addr = cpu_to_le32(start);
319 ent->t262.end_addr = cpu_to_le32(end);
322 ql_dbg(ql_dbg_misc, vha, 0xd022,
323 "%s: unknown area %x\n", __func__, area);
324 qla27xx_skip_entry(ent, buf);
328 if (end < start || start == 0 || end == 0) {
329 ql_dbg(ql_dbg_misc, vha, 0xd023,
330 "%s: unusable range (start=%lx end=%lx)\n",
331 __func__, start, end);
332 qla27xx_skip_entry(ent, buf);
336 dwords = end - start + 1;
339 rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
340 if (rc != QLA_SUCCESS) {
341 ql_dbg(ql_dbg_async, vha, 0xffff,
342 "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
343 __func__, area, start, end);
344 return INVALID_ENTRY;
347 *len += dwords * sizeof(uint32_t);
349 return qla27xx_next_entry(ent);
352 static struct qla27xx_fwdt_entry *
353 qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
354 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
356 uint type = ent->t263.queue_type;
361 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207,
362 "%s: getq(%x) [%lx]\n", __func__, type, *len);
363 if (type == T263_QUEUE_TYPE_REQ) {
364 for (i = 0; i < vha->hw->max_req_queues; i++) {
365 struct req_que *req = vha->hw->req_q_map[i];
369 req->length : REQUEST_ENTRY_CNT_24XX;
370 qla27xx_insert16(i, buf, len);
371 qla27xx_insert16(length, buf, len);
372 qla27xx_insertbuf(req ? req->ring : NULL,
373 length * sizeof(*req->ring), buf, len);
377 } else if (type == T263_QUEUE_TYPE_RSP) {
378 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
379 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
383 rsp->length : RESPONSE_ENTRY_CNT_MQ;
384 qla27xx_insert16(i, buf, len);
385 qla27xx_insert16(length, buf, len);
386 qla27xx_insertbuf(rsp ? rsp->ring : NULL,
387 length * sizeof(*rsp->ring), buf, len);
391 } else if (QLA_TGT_MODE_ENABLED() &&
392 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
393 struct qla_hw_data *ha = vha->hw;
394 struct atio *atr = ha->tgt.atio_ring;
397 length = ha->tgt.atio_q_length;
398 qla27xx_insert16(0, buf, len);
399 qla27xx_insert16(length, buf, len);
400 qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
404 ql_dbg(ql_dbg_misc, vha, 0xd026,
405 "%s: unknown queue %x\n", __func__, type);
406 qla27xx_skip_entry(ent, buf);
411 ent->t263.num_queues = count;
413 qla27xx_skip_entry(ent, buf);
416 return qla27xx_next_entry(ent);
419 static struct qla27xx_fwdt_entry *
420 qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
421 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
423 ql_dbg(ql_dbg_misc, vha, 0xd208,
424 "%s: getfce [%lx]\n", __func__, *len);
427 ent->t264.fce_trace_size = FCE_SIZE;
428 ent->t264.write_pointer = vha->hw->fce_wr;
429 ent->t264.base_pointer = vha->hw->fce_dma;
430 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
431 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
432 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
433 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
434 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
435 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
437 qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
439 ql_dbg(ql_dbg_misc, vha, 0xd027,
440 "%s: missing fce\n", __func__);
441 qla27xx_skip_entry(ent, buf);
444 return qla27xx_next_entry(ent);
447 static struct qla27xx_fwdt_entry *
448 qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
449 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
451 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209,
452 "%s: pause risc [%lx]\n", __func__, *len);
454 qla24xx_pause_risc(ISPREG(vha), vha->hw);
456 return qla27xx_next_entry(ent);
459 static struct qla27xx_fwdt_entry *
460 qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
461 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
463 ql_dbg(ql_dbg_misc, vha, 0xd20a,
464 "%s: reset risc [%lx]\n", __func__, *len);
466 WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
468 return qla27xx_next_entry(ent);
471 static struct qla27xx_fwdt_entry *
472 qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
473 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
475 uint offset = ent->t267.pci_offset;
476 ulong data = le32_to_cpu(ent->t267.data);
478 ql_dbg(ql_dbg_misc, vha, 0xd20b,
479 "%s: dis intr [%lx]\n", __func__, *len);
480 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
482 return qla27xx_next_entry(ent);
485 static struct qla27xx_fwdt_entry *
486 qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
487 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
489 ql_dbg(ql_dbg_misc, vha, 0xd20c,
490 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
491 switch (ent->t268.buf_type) {
492 case T268_BUF_TYPE_EXTD_TRACE:
495 ent->t268.buf_size = EFT_SIZE;
496 ent->t268.start_addr = vha->hw->eft_dma;
498 qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
500 ql_dbg(ql_dbg_misc, vha, 0xd028,
501 "%s: missing eft\n", __func__);
502 qla27xx_skip_entry(ent, buf);
505 case T268_BUF_TYPE_EXCH_BUFOFF:
506 if (vha->hw->exchoffld_buf) {
508 ent->t268.buf_size = vha->hw->exchoffld_size;
509 ent->t268.start_addr =
510 vha->hw->exchoffld_buf_dma;
512 qla27xx_insertbuf(vha->hw->exchoffld_buf,
513 vha->hw->exchoffld_size, buf, len);
515 ql_dbg(ql_dbg_misc, vha, 0xd028,
516 "%s: missing exch offld\n", __func__);
517 qla27xx_skip_entry(ent, buf);
520 case T268_BUF_TYPE_EXTD_LOGIN:
521 if (vha->hw->exlogin_buf) {
523 ent->t268.buf_size = vha->hw->exlogin_size;
524 ent->t268.start_addr =
525 vha->hw->exlogin_buf_dma;
527 qla27xx_insertbuf(vha->hw->exlogin_buf,
528 vha->hw->exlogin_size, buf, len);
530 ql_dbg(ql_dbg_misc, vha, 0xd028,
531 "%s: missing ext login\n", __func__);
532 qla27xx_skip_entry(ent, buf);
536 case T268_BUF_TYPE_REQ_MIRROR:
537 case T268_BUF_TYPE_RSP_MIRROR:
539 * Mirror pointers are not implemented in the
540 * driver, instead shadow pointers are used by
541 * the drier. Skip these entries.
543 qla27xx_skip_entry(ent, buf);
546 ql_dbg(ql_dbg_async, vha, 0xd02b,
547 "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
548 qla27xx_skip_entry(ent, buf);
552 return qla27xx_next_entry(ent);
555 static struct qla27xx_fwdt_entry *
556 qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
557 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
559 ql_dbg(ql_dbg_misc, vha, 0xd20d,
560 "%s: scratch [%lx]\n", __func__, *len);
561 qla27xx_insert32(0xaaaaaaaa, buf, len);
562 qla27xx_insert32(0xbbbbbbbb, buf, len);
563 qla27xx_insert32(0xcccccccc, buf, len);
564 qla27xx_insert32(0xdddddddd, buf, len);
565 qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
567 ent->t269.scratch_size = 5 * sizeof(uint32_t);
569 return qla27xx_next_entry(ent);
572 static struct qla27xx_fwdt_entry *
573 qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
574 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
576 ulong addr = le32_to_cpu(ent->t270.addr);
577 ulong dwords = le32_to_cpu(ent->t270.count);
579 ql_dbg(ql_dbg_misc, vha, 0xd20e,
580 "%s: rdremreg [%lx]\n", __func__, *len);
581 qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf);
583 qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf);
584 qla27xx_insert32(addr, buf, len);
585 qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len);
586 addr += sizeof(uint32_t);
589 return qla27xx_next_entry(ent);
592 static struct qla27xx_fwdt_entry *
593 qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
594 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
596 ulong addr = le32_to_cpu(ent->t271.addr);
597 ulong data = le32_to_cpu(ent->t271.data);
599 ql_dbg(ql_dbg_misc, vha, 0xd20f,
600 "%s: wrremreg [%lx]\n", __func__, *len);
601 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf);
602 qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf);
603 qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf);
605 return qla27xx_next_entry(ent);
608 static struct qla27xx_fwdt_entry *
609 qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
610 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
612 ulong dwords = le32_to_cpu(ent->t272.count);
613 ulong start = le32_to_cpu(ent->t272.addr);
615 ql_dbg(ql_dbg_misc, vha, 0xd210,
616 "%s: rdremram [%lx]\n", __func__, *len);
618 ql_dbg(ql_dbg_misc, vha, 0xd02c,
619 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
621 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
623 *len += dwords * sizeof(uint32_t);
625 return qla27xx_next_entry(ent);
628 static struct qla27xx_fwdt_entry *
629 qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
630 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
632 ulong dwords = le32_to_cpu(ent->t273.count);
633 ulong addr = le32_to_cpu(ent->t273.addr);
636 ql_dbg(ql_dbg_misc, vha, 0xd211,
637 "%s: pcicfg [%lx]\n", __func__, *len);
640 if (pci_read_config_dword(vha->hw->pdev, addr, &value))
641 ql_dbg(ql_dbg_misc, vha, 0xd02d,
642 "%s: failed pcicfg read at %lx\n", __func__, addr);
643 qla27xx_insert32(addr, buf, len);
644 qla27xx_insert32(value, buf, len);
645 addr += sizeof(uint32_t);
648 return qla27xx_next_entry(ent);
651 static struct qla27xx_fwdt_entry *
652 qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
653 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
655 ulong type = ent->t274.queue_type;
659 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212,
660 "%s: getqsh(%lx) [%lx]\n", __func__, type, *len);
661 if (type == T274_QUEUE_TYPE_REQ_SHAD) {
662 for (i = 0; i < vha->hw->max_req_queues; i++) {
663 struct req_que *req = vha->hw->req_q_map[i];
666 qla27xx_insert16(i, buf, len);
667 qla27xx_insert16(1, buf, len);
668 qla27xx_insert32(req && req->out_ptr ?
669 *req->out_ptr : 0, buf, len);
673 } else if (type == T274_QUEUE_TYPE_RSP_SHAD) {
674 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
675 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
678 qla27xx_insert16(i, buf, len);
679 qla27xx_insert16(1, buf, len);
680 qla27xx_insert32(rsp && rsp->in_ptr ?
681 *rsp->in_ptr : 0, buf, len);
685 } else if (QLA_TGT_MODE_ENABLED() &&
686 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
687 struct qla_hw_data *ha = vha->hw;
688 struct atio *atr = ha->tgt.atio_ring_ptr;
691 qla27xx_insert16(0, buf, len);
692 qla27xx_insert16(1, buf, len);
693 qla27xx_insert32(ha->tgt.atio_q_in ?
694 readl(ha->tgt.atio_q_in) : 0, buf, len);
698 ql_dbg(ql_dbg_misc, vha, 0xd02f,
699 "%s: unknown queue %lx\n", __func__, type);
700 qla27xx_skip_entry(ent, buf);
705 ent->t274.num_queues = count;
707 qla27xx_skip_entry(ent, buf);
710 return qla27xx_next_entry(ent);
713 static struct qla27xx_fwdt_entry *
714 qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
715 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
717 ulong offset = offsetof(typeof(*ent), t275.buffer);
718 ulong length = le32_to_cpu(ent->t275.length);
719 ulong size = le32_to_cpu(ent->hdr.size);
720 void *buffer = ent->t275.buffer;
722 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213,
723 "%s: buffer(%lx) [%lx]\n", __func__, length, *len);
725 ql_dbg(ql_dbg_misc, vha, 0xd020,
726 "%s: buffer zero length\n", __func__);
727 qla27xx_skip_entry(ent, buf);
730 if (offset + length > size) {
731 length = size - offset;
732 ql_dbg(ql_dbg_misc, vha, 0xd030,
733 "%s: buffer overflow, truncate [%lx]\n", __func__, length);
734 ent->t275.length = cpu_to_le32(length);
737 qla27xx_insertbuf(buffer, length, buf, len);
739 return qla27xx_next_entry(ent);
742 static struct qla27xx_fwdt_entry *
743 qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
744 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
746 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
747 "%s: cond [%lx]\n", __func__, *len);
750 ulong cond1 = le32_to_cpu(ent->t276.cond1);
751 ulong cond2 = le32_to_cpu(ent->t276.cond2);
752 uint type = vha->hw->pdev->device >> 4 & 0xf;
753 uint func = vha->hw->port_no & 0x3;
755 if (type != cond1 || func != cond2) {
756 struct qla27xx_fwdt_template *tmp = buf;
759 ent = qla27xx_next_entry(ent);
760 qla27xx_skip_entry(ent, buf);
764 return qla27xx_next_entry(ent);
767 static struct qla27xx_fwdt_entry *
768 qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
769 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
771 ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr);
772 ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data);
773 ulong data_addr = le32_to_cpu(ent->t277.data_addr);
775 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
776 "%s: rdpep [%lx]\n", __func__, *len);
777 qla27xx_insert32(wr_cmd_data, buf, len);
778 qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
779 qla27xx_read_reg(ISPREG(vha), data_addr, buf, len);
781 return qla27xx_next_entry(ent);
784 static struct qla27xx_fwdt_entry *
785 qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
786 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
788 ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr);
789 ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data);
790 ulong data_addr = le32_to_cpu(ent->t278.data_addr);
791 ulong wr_data = le32_to_cpu(ent->t278.wr_data);
793 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
794 "%s: wrpep [%lx]\n", __func__, *len);
795 qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf);
796 qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
798 return qla27xx_next_entry(ent);
801 static struct qla27xx_fwdt_entry *
802 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
803 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
805 ulong type = le32_to_cpu(ent->hdr.type);
807 ql_dbg(ql_dbg_misc, vha, 0xd2ff,
808 "%s: other %lx [%lx]\n", __func__, type, *len);
809 qla27xx_skip_entry(ent, buf);
811 return qla27xx_next_entry(ent);
816 typeof(qla27xx_fwdt_entry_other)(*call);
817 } qla27xx_fwdt_entry_call[] = {
818 { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 },
819 { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 },
820 { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 },
821 { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 },
822 { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 },
823 { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 },
824 { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 },
825 { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 },
826 { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 },
827 { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 },
828 { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 },
829 { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 },
830 { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 },
831 { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 },
832 { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 },
833 { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 },
834 { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 },
835 { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 },
836 { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 },
837 { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 },
838 { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 },
839 { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 },
840 { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 },
841 { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 },
842 { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 },
843 { -1, qla27xx_fwdt_entry_other }
847 typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
849 typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
851 while (list->type < type)
854 if (list->type == type)
856 return qla27xx_fwdt_entry_other;
860 qla27xx_walk_template(struct scsi_qla_host *vha,
861 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
863 struct qla27xx_fwdt_entry *ent = (void *)tmp +
864 le32_to_cpu(tmp->entry_offset);
867 tmp->count = le32_to_cpu(tmp->entry_count);
868 ql_dbg(ql_dbg_misc, vha, 0xd01a,
869 "%s: entry count %u\n", __func__, tmp->count);
870 while (ent && tmp->count--) {
871 type = le32_to_cpu(ent->hdr.type);
872 ent = qla27xx_find_entry(type)(vha, ent, buf, len);
876 if (ent == INVALID_ENTRY) {
878 ql_dbg(ql_dbg_async, vha, 0xffff,
879 "Unable to capture FW dump");
885 ql_dbg(ql_dbg_misc, vha, 0xd018,
886 "%s: entry count residual=+%u\n", __func__, tmp->count);
889 ql_dbg(ql_dbg_misc, vha, 0xd019,
890 "%s: missing end entry\n", __func__);
893 cpu_to_le32s(&tmp->count); /* endianize residual count */
897 qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
899 tmp->capture_timestamp = cpu_to_le32(jiffies);
903 qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
905 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
907 WARN_ON_ONCE(sscanf(qla2x00_version_str,
908 "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
909 v+0, v+1, v+2, v+3, v+4, v+5) != 6);
911 tmp->driver_info[0] = cpu_to_le32(
912 v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]);
913 tmp->driver_info[1] = cpu_to_le32(v[5] << 8 | v[4]);
914 tmp->driver_info[2] = __constant_cpu_to_le32(0x12345678);
918 qla27xx_firmware_info(struct scsi_qla_host *vha,
919 struct qla27xx_fwdt_template *tmp)
921 tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version);
922 tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version);
923 tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version);
924 tmp->firmware_version[3] = cpu_to_le32(
925 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes);
926 tmp->firmware_version[4] = cpu_to_le32(
927 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]);
931 ql27xx_edit_template(struct scsi_qla_host *vha,
932 struct qla27xx_fwdt_template *tmp)
934 qla27xx_time_stamp(tmp);
935 qla27xx_driver_info(tmp);
936 qla27xx_firmware_info(vha, tmp);
939 static inline uint32_t
940 qla27xx_template_checksum(void *p, ulong size)
945 size /= sizeof(*buf);
947 for ( ; size--; buf++)
948 sum += le32_to_cpu(*buf);
950 sum = (sum & 0xffffffff) + (sum >> 32);
956 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
958 return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
962 qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
964 return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP;
968 qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
969 struct qla27xx_fwdt_template *tmp, void *buf)
973 if (qla27xx_fwdt_template_valid(tmp)) {
974 len = tmp->template_size;
975 tmp = memcpy(buf, tmp, len);
976 ql27xx_edit_template(vha, tmp);
977 qla27xx_walk_template(vha, tmp, buf, &len);
984 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
986 struct qla27xx_fwdt_template *tmp = p;
989 if (qla27xx_fwdt_template_valid(tmp)) {
990 len = tmp->template_size;
991 qla27xx_walk_template(vha, tmp, NULL, &len);
998 qla27xx_fwdt_template_size(void *p)
1000 struct qla27xx_fwdt_template *tmp = p;
1002 return tmp->template_size;
1006 qla27xx_fwdt_template_valid(void *p)
1008 struct qla27xx_fwdt_template *tmp = p;
1010 if (!qla27xx_verify_template_header(tmp)) {
1011 ql_log(ql_log_warn, NULL, 0xd01c,
1012 "%s: template type %x\n", __func__,
1013 le32_to_cpu(tmp->template_type));
1017 if (!qla27xx_verify_template_checksum(tmp)) {
1018 ql_log(ql_log_warn, NULL, 0xd01d,
1019 "%s: failed template checksum\n", __func__);
1027 qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
1030 bool need_mpi_reset = true;
1033 if (!hardware_locked)
1034 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
1036 if (!vha->hw->mpi_fw_dump) {
1037 ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
1038 } else if (vha->hw->mpi_fw_dumped) {
1039 ql_log(ql_log_warn, vha, 0x02f4,
1040 "-> MPI firmware already dumped (%p) -- ignoring request\n",
1041 vha->hw->mpi_fw_dump);
1043 struct fwdt *fwdt = &vha->hw->fwdt[1];
1045 void *buf = vha->hw->mpi_fw_dump;
1047 ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n");
1048 if (!fwdt->template) {
1049 ql_log(ql_log_warn, vha, 0x02f6,
1050 "-> fwdt1 no template\n");
1053 len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
1056 } else if (len != fwdt->dump_size) {
1057 ql_log(ql_log_warn, vha, 0x02f7,
1058 "-> fwdt1 fwdump residual=%+ld\n",
1059 fwdt->dump_size - len);
1061 need_mpi_reset = false;
1064 vha->hw->mpi_fw_dump_len = len;
1065 vha->hw->mpi_fw_dumped = 1;
1067 ql_log(ql_log_warn, vha, 0x02f8,
1068 "-> MPI firmware dump saved to buffer (%lu/%p)\n",
1069 vha->host_no, vha->hw->mpi_fw_dump);
1070 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
1075 qla27xx_reset_mpi(vha);
1077 if (!hardware_locked)
1078 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
1083 qla27xx_fwdump(scsi_qla_host_t *vha)
1085 lockdep_assert_held(&vha->hw->hardware_lock);
1087 if (!vha->hw->fw_dump) {
1088 ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
1089 } else if (vha->hw->fw_dumped) {
1090 ql_log(ql_log_warn, vha, 0xd01f,
1091 "-> Firmware already dumped (%p) -- ignoring request\n",
1094 struct fwdt *fwdt = vha->hw->fwdt;
1096 void *buf = vha->hw->fw_dump;
1098 ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n");
1099 if (!fwdt->template) {
1100 ql_log(ql_log_warn, vha, 0xd012,
1101 "-> fwdt0 no template\n");
1104 len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
1107 } else if (len != fwdt->dump_size) {
1108 ql_log(ql_log_warn, vha, 0xd013,
1109 "-> fwdt0 fwdump residual=%+ld\n",
1110 fwdt->dump_size - len);
1113 vha->hw->fw_dump_len = len;
1114 vha->hw->fw_dumped = true;
1116 ql_log(ql_log_warn, vha, 0xd015,
1117 "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
1118 vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
1119 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);