2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
10 /* note default template is in big endian */
11 static const uint32_t ql27xx_fwdt_default_template[] = {
12 0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
13 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
14 0x00000000, 0x00000000, 0x00000000, 0x00000000,
15 0x00000000, 0x00000000, 0x00000000, 0x00000000,
16 0x00000000, 0x00000000, 0x00000000, 0x00000000,
17 0x00000000, 0x00000000, 0x00000000, 0x00000000,
18 0x00000000, 0x00000000, 0x00000000, 0x00000000,
19 0x00000000, 0x00000000, 0x00000000, 0x00000000,
20 0x00000000, 0x00000000, 0x00000000, 0x00000000,
21 0x00000000, 0x00000000, 0x00000000, 0x00000000,
22 0x00000000, 0x04010000, 0x14000000, 0x00000000,
23 0x02000000, 0x44000000, 0x09010000, 0x10000000,
24 0x00000000, 0x02000000, 0x01010000, 0x1c000000,
25 0x00000000, 0x02000000, 0x00600000, 0x00000000,
26 0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
27 0x02000000, 0x00600000, 0x00000000, 0xcc000000,
28 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
29 0x10600000, 0x00000000, 0xd4000000, 0x01010000,
30 0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
31 0x00000060, 0xf0000000, 0x00010000, 0x18000000,
32 0x00000000, 0x02000000, 0x00700000, 0x041000c0,
33 0x00010000, 0x18000000, 0x00000000, 0x02000000,
34 0x10700000, 0x041000c0, 0x00010000, 0x18000000,
35 0x00000000, 0x02000000, 0x40700000, 0x041000c0,
36 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
37 0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
38 0x18000000, 0x00000000, 0x02000000, 0x007c0000,
39 0x040300c4, 0x00010000, 0x18000000, 0x00000000,
40 0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
41 0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
42 0x00000000, 0xc0000000, 0x00010000, 0x18000000,
43 0x00000000, 0x02000000, 0x007c0000, 0x04200000,
44 0x0b010000, 0x18000000, 0x00000000, 0x02000000,
45 0x0c000000, 0x00000000, 0x02010000, 0x20000000,
46 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
47 0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
48 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
49 0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
50 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
51 0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
52 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
53 0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
54 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
55 0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
56 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
57 0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
58 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
59 0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
60 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
61 0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
62 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
63 0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
64 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
65 0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
66 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
67 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
68 0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
69 0x00010000, 0x18000000, 0x00000000, 0x02000000,
70 0x0a000000, 0x04200080, 0x00010000, 0x18000000,
71 0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
72 0x00010000, 0x18000000, 0x00000000, 0x02000000,
73 0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
74 0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
75 0x00010000, 0x18000000, 0x00000000, 0x02000000,
76 0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
77 0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
78 0x00010000, 0x18000000, 0x00000000, 0x02000000,
79 0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
80 0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
81 0x00010000, 0x18000000, 0x00000000, 0x02000000,
82 0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
83 0x00000000, 0x02000000, 0x00300000, 0x041000c0,
84 0x00010000, 0x18000000, 0x00000000, 0x02000000,
85 0x10300000, 0x041000c0, 0x00010000, 0x18000000,
86 0x00000000, 0x02000000, 0x20300000, 0x041000c0,
87 0x00010000, 0x18000000, 0x00000000, 0x02000000,
88 0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
89 0x00000000, 0x02000000, 0x06010000, 0x1c000000,
90 0x00000000, 0x02000000, 0x01000000, 0x00000200,
91 0xff230200, 0x06010000, 0x1c000000, 0x00000000,
92 0x02000000, 0x02000000, 0x00001000, 0x00000000,
93 0x07010000, 0x18000000, 0x00000000, 0x02000000,
94 0x00000000, 0x01000000, 0x07010000, 0x18000000,
95 0x00000000, 0x02000000, 0x00000000, 0x02000000,
96 0x07010000, 0x18000000, 0x00000000, 0x02000000,
97 0x00000000, 0x03000000, 0x0d010000, 0x14000000,
98 0x00000000, 0x02000000, 0x00000000, 0xff000000,
99 0x10000000, 0x00000000, 0x00000080,
102 static inline void __iomem *
103 qla27xx_isp_reg(struct scsi_qla_host *vha)
105 return &vha->hw->iobase->isp24;
109 qla27xx_insert16(uint16_t value, void *buf, ulong *len)
113 *(__le16 *)buf = cpu_to_le16(value);
115 *len += sizeof(value);
119 qla27xx_insert32(uint32_t value, void *buf, ulong *len)
123 *(__le32 *)buf = cpu_to_le32(value);
125 *len += sizeof(value);
129 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
132 if (buf && mem && size) {
134 memcpy(buf, mem, size);
140 qla27xx_read8(void *window, void *buf, ulong *len)
145 value = RD_REG_BYTE((__iomem void *)window);
147 qla27xx_insert32(value, buf, len);
151 qla27xx_read16(void *window, void *buf, ulong *len)
156 value = RD_REG_WORD((__iomem void *)window);
158 qla27xx_insert32(value, buf, len);
162 qla27xx_read32(void *window, void *buf, ulong *len)
167 value = RD_REG_DWORD((__iomem void *)window);
169 qla27xx_insert32(value, buf, len);
172 static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *)
175 (width == 1) ? qla27xx_read8 :
176 (width == 2) ? qla27xx_read16 :
181 qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
182 uint offset, void *buf, ulong *len)
184 void *window = (void *)reg + offset;
186 qla27xx_read32(window, buf, len);
190 qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
191 uint offset, uint32_t data, void *buf)
193 __iomem void *window = reg + offset;
196 WRT_REG_DWORD(window, data);
201 qla27xx_read_window(__iomem struct device_reg_24xx *reg,
202 uint32_t addr, uint offset, uint count, uint width, void *buf,
205 void *window = (void *)reg + offset;
206 void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
208 qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
210 qla27xx_insert32(addr, buf, len);
211 readn(window, buf, len);
218 qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
221 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
225 qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
226 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
228 ql_dbg(ql_dbg_misc, vha, 0xd100,
229 "%s: nop [%lx]\n", __func__, *len);
230 qla27xx_skip_entry(ent, buf);
236 qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
237 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
239 ql_dbg(ql_dbg_misc, vha, 0xd1ff,
240 "%s: end [%lx]\n", __func__, *len);
241 qla27xx_skip_entry(ent, buf);
248 qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
249 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
251 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
253 ql_dbg(ql_dbg_misc, vha, 0xd200,
254 "%s: rdio t1 [%lx]\n", __func__, *len);
255 qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
256 ent->t256.reg_count, ent->t256.reg_width, buf, len);
262 qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
263 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
265 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
267 ql_dbg(ql_dbg_misc, vha, 0xd201,
268 "%s: wrio t1 [%lx]\n", __func__, *len);
269 qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
270 qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
276 qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
277 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
279 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
281 ql_dbg(ql_dbg_misc, vha, 0xd202,
282 "%s: rdio t2 [%lx]\n", __func__, *len);
283 qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
284 qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
285 ent->t258.reg_count, ent->t258.reg_width, buf, len);
291 qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
292 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
294 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
296 ql_dbg(ql_dbg_misc, vha, 0xd203,
297 "%s: wrio t2 [%lx]\n", __func__, *len);
298 qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
299 qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
300 qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
306 qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
307 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
309 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
311 ql_dbg(ql_dbg_misc, vha, 0xd204,
312 "%s: rdpci [%lx]\n", __func__, *len);
313 qla27xx_insert32(ent->t260.pci_offset, buf, len);
314 qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
320 qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
321 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
323 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
325 ql_dbg(ql_dbg_misc, vha, 0xd205,
326 "%s: wrpci [%lx]\n", __func__, *len);
327 qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
333 qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
334 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
340 ql_dbg(ql_dbg_misc, vha, 0xd206,
341 "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
342 start = ent->t262.start_addr;
343 end = ent->t262.end_addr;
345 if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
347 } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
348 end = vha->hw->fw_memory_size;
350 ent->t262.end_addr = end;
351 } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
352 start = vha->hw->fw_shared_ram_start;
353 end = vha->hw->fw_shared_ram_end;
355 ent->t262.start_addr = start;
356 ent->t262.end_addr = end;
358 } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
359 ql_dbg(ql_dbg_misc, vha, 0xd021,
360 "%s: unsupported ddr ram\n", __func__);
361 qla27xx_skip_entry(ent, buf);
364 ql_dbg(ql_dbg_misc, vha, 0xd022,
365 "%s: unknown area %u\n", __func__, ent->t262.ram_area);
366 qla27xx_skip_entry(ent, buf);
370 if (end < start || end == 0) {
371 ql_dbg(ql_dbg_misc, vha, 0xd023,
372 "%s: unusable range (start=%x end=%x)\n", __func__,
373 ent->t262.end_addr, ent->t262.start_addr);
374 qla27xx_skip_entry(ent, buf);
378 dwords = end - start + 1;
380 ql_dbg(ql_dbg_misc, vha, 0xd024,
381 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
383 qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
385 *len += dwords * sizeof(uint32_t);
391 qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
392 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
398 ql_dbg(ql_dbg_misc, vha, 0xd207,
399 "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
400 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
401 for (i = 0; i < vha->hw->max_req_queues; i++) {
402 struct req_que *req = vha->hw->req_q_map[i];
405 req->length : REQUEST_ENTRY_CNT_24XX;
406 qla27xx_insert16(i, buf, len);
407 qla27xx_insert16(length, buf, len);
408 qla27xx_insertbuf(req ? req->ring : NULL,
409 length * sizeof(*req->ring), buf, len);
413 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
414 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
415 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
418 rsp->length : RESPONSE_ENTRY_CNT_MQ;
419 qla27xx_insert16(i, buf, len);
420 qla27xx_insert16(length, buf, len);
421 qla27xx_insertbuf(rsp ? rsp->ring : NULL,
422 length * sizeof(*rsp->ring), buf, len);
426 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
427 ql_dbg(ql_dbg_misc, vha, 0xd025,
428 "%s: unsupported atio queue\n", __func__);
429 qla27xx_skip_entry(ent, buf);
431 ql_dbg(ql_dbg_misc, vha, 0xd026,
432 "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
433 qla27xx_skip_entry(ent, buf);
437 ent->t263.num_queues = count;
443 qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
444 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
446 ql_dbg(ql_dbg_misc, vha, 0xd208,
447 "%s: getfce [%lx]\n", __func__, *len);
450 ent->t264.fce_trace_size = FCE_SIZE;
451 ent->t264.write_pointer = vha->hw->fce_wr;
452 ent->t264.base_pointer = vha->hw->fce_dma;
453 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
454 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
455 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
456 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
457 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
458 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
460 qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
462 ql_dbg(ql_dbg_misc, vha, 0xd027,
463 "%s: missing fce\n", __func__);
464 qla27xx_skip_entry(ent, buf);
471 qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
472 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
474 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
476 ql_dbg(ql_dbg_misc, vha, 0xd209,
477 "%s: pause risc [%lx]\n", __func__, *len);
479 qla24xx_pause_risc(reg, vha->hw);
485 qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
486 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
488 ql_dbg(ql_dbg_misc, vha, 0xd20a,
489 "%s: reset risc [%lx]\n", __func__, *len);
491 qla24xx_soft_reset(vha->hw);
497 qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
498 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
500 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
502 ql_dbg(ql_dbg_misc, vha, 0xd20b,
503 "%s: dis intr [%lx]\n", __func__, *len);
504 qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
510 qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
511 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
513 ql_dbg(ql_dbg_misc, vha, 0xd20c,
514 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
515 if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
518 ent->t268.buf_size = EFT_SIZE;
519 ent->t268.start_addr = vha->hw->eft_dma;
521 qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
523 ql_dbg(ql_dbg_misc, vha, 0xd028,
524 "%s: missing eft\n", __func__);
525 qla27xx_skip_entry(ent, buf);
527 } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) {
528 ql_dbg(ql_dbg_misc, vha, 0xd029,
529 "%s: unsupported exchange offload buffer\n", __func__);
530 qla27xx_skip_entry(ent, buf);
531 } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) {
532 ql_dbg(ql_dbg_misc, vha, 0xd02a,
533 "%s: unsupported extended login buffer\n", __func__);
534 qla27xx_skip_entry(ent, buf);
536 ql_dbg(ql_dbg_misc, vha, 0xd02b,
537 "%s: unknown buf %x\n", __func__, ent->t268.buf_type);
538 qla27xx_skip_entry(ent, buf);
545 qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
546 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
548 ql_dbg(ql_dbg_misc, vha, 0xd20d,
549 "%s: scratch [%lx]\n", __func__, *len);
550 qla27xx_insert32(0xaaaaaaaa, buf, len);
551 qla27xx_insert32(0xbbbbbbbb, buf, len);
552 qla27xx_insert32(0xcccccccc, buf, len);
553 qla27xx_insert32(0xdddddddd, buf, len);
554 qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
556 ent->t269.scratch_size = 5 * sizeof(uint32_t);
562 qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
563 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
565 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
566 ulong dwords = ent->t270.count;
567 ulong addr = ent->t270.addr;
569 ql_dbg(ql_dbg_misc, vha, 0xd20e,
570 "%s: rdremreg [%lx]\n", __func__, *len);
571 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
573 qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
574 qla27xx_insert32(addr, buf, len);
575 qla27xx_read_reg(reg, 0xc4, buf, len);
576 addr += sizeof(uint32_t);
583 qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
584 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
586 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
587 ulong addr = ent->t271.addr;
588 ulong data = ent->t271.data;
590 ql_dbg(ql_dbg_misc, vha, 0xd20f,
591 "%s: wrremreg [%lx]\n", __func__, *len);
592 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
593 qla27xx_write_reg(reg, 0xc4, data, buf);
594 qla27xx_write_reg(reg, 0xc0, addr, buf);
600 qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
601 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
603 ulong dwords = ent->t272.count;
604 ulong start = ent->t272.addr;
606 ql_dbg(ql_dbg_misc, vha, 0xd210,
607 "%s: rdremram [%lx]\n", __func__, *len);
609 ql_dbg(ql_dbg_misc, vha, 0xd02c,
610 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
612 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
614 *len += dwords * sizeof(uint32_t);
620 qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
621 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
623 ulong dwords = ent->t273.count;
624 ulong addr = ent->t273.addr;
627 ql_dbg(ql_dbg_misc, vha, 0xd211,
628 "%s: pcicfg [%lx]\n", __func__, *len);
631 if (pci_read_config_dword(vha->hw->pdev, addr, &value))
632 ql_dbg(ql_dbg_misc, vha, 0xd02d,
633 "%s: failed pcicfg read at %lx\n", __func__, addr);
634 qla27xx_insert32(addr, buf, len);
635 qla27xx_insert32(value, buf, len);
636 addr += sizeof(uint32_t);
643 qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
644 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
649 ql_dbg(ql_dbg_misc, vha, 0xd212,
650 "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
651 if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
652 for (i = 0; i < vha->hw->max_req_queues; i++) {
653 struct req_que *req = vha->hw->req_q_map[i];
655 qla27xx_insert16(i, buf, len);
656 qla27xx_insert16(1, buf, len);
657 qla27xx_insert32(req && req->out_ptr ?
658 *req->out_ptr : 0, buf, len);
662 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
663 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
664 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
666 qla27xx_insert16(i, buf, len);
667 qla27xx_insert16(1, buf, len);
668 qla27xx_insert32(rsp && rsp->in_ptr ?
669 *rsp->in_ptr : 0, buf, len);
673 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
674 ql_dbg(ql_dbg_misc, vha, 0xd02e,
675 "%s: unsupported atio queue\n", __func__);
676 qla27xx_skip_entry(ent, buf);
678 ql_dbg(ql_dbg_misc, vha, 0xd02f,
679 "%s: unknown queue %u\n", __func__, ent->t274.queue_type);
680 qla27xx_skip_entry(ent, buf);
684 ent->t274.num_queues = count;
687 qla27xx_skip_entry(ent, buf);
693 qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
694 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
696 ulong offset = offsetof(typeof(*ent), t275.buffer);
698 ql_dbg(ql_dbg_misc, vha, 0xd213,
699 "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
700 if (!ent->t275.length) {
701 ql_dbg(ql_dbg_misc, vha, 0xd020,
702 "%s: buffer zero length\n", __func__);
703 qla27xx_skip_entry(ent, buf);
706 if (offset + ent->t275.length > ent->hdr.entry_size) {
707 ql_dbg(ql_dbg_misc, vha, 0xd030,
708 "%s: buffer overflow\n", __func__);
709 qla27xx_skip_entry(ent, buf);
713 qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
719 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
720 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
722 ql_dbg(ql_dbg_misc, vha, 0xd2ff,
723 "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
724 qla27xx_skip_entry(ent, buf);
729 struct qla27xx_fwdt_entry_call {
732 struct scsi_qla_host *,
733 struct qla27xx_fwdt_entry *,
738 static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
739 { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } ,
740 { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } ,
741 { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } ,
742 { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } ,
743 { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } ,
744 { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } ,
745 { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } ,
746 { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } ,
747 { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } ,
748 { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } ,
749 { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } ,
750 { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } ,
751 { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } ,
752 { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } ,
753 { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } ,
754 { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } ,
755 { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } ,
756 { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
757 { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
758 { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
759 { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
760 { ENTRY_TYPE_WRITE_BUF , qla27xx_fwdt_entry_t275 } ,
761 { -1 , qla27xx_fwdt_entry_other }
764 static inline int (*qla27xx_find_entry(uint type))
765 (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
767 struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
769 while (list->type < type)
772 if (list->type == type)
774 return qla27xx_fwdt_entry_other;
778 qla27xx_next_entry(void *p)
780 struct qla27xx_fwdt_entry *ent = p;
782 return p + ent->hdr.entry_size;
786 qla27xx_walk_template(struct scsi_qla_host *vha,
787 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
789 struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
790 ulong count = tmp->entry_count;
792 ql_dbg(ql_dbg_misc, vha, 0xd01a,
793 "%s: entry count %lx\n", __func__, count);
795 if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
797 ent = qla27xx_next_entry(ent);
801 ql_dbg(ql_dbg_misc, vha, 0xd018,
802 "%s: residual count (%lx)\n", __func__, count);
804 if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
805 ql_dbg(ql_dbg_misc, vha, 0xd019,
806 "%s: missing end (%lx)\n", __func__, count);
808 ql_dbg(ql_dbg_misc, vha, 0xd01b,
809 "%s: len=%lx\n", __func__, *len);
813 qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
815 tmp->capture_timestamp = jiffies;
819 qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
821 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
824 rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
825 v+0, v+1, v+2, v+3, v+4, v+5);
827 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
828 tmp->driver_info[1] = v[5] << 8 | v[4];
829 tmp->driver_info[2] = 0x12345678;
833 qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
834 struct scsi_qla_host *vha)
836 tmp->firmware_version[0] = vha->hw->fw_major_version;
837 tmp->firmware_version[1] = vha->hw->fw_minor_version;
838 tmp->firmware_version[2] = vha->hw->fw_subminor_version;
839 tmp->firmware_version[3] =
840 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
841 tmp->firmware_version[4] =
842 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
846 ql27xx_edit_template(struct scsi_qla_host *vha,
847 struct qla27xx_fwdt_template *tmp)
849 qla27xx_time_stamp(tmp);
850 qla27xx_driver_info(tmp);
851 qla27xx_firmware_info(tmp, vha);
854 static inline uint32_t
855 qla27xx_template_checksum(void *p, ulong size)
860 size /= sizeof(*buf);
865 sum = (sum & 0xffffffff) + (sum >> 32);
871 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
873 return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
877 qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
879 return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
883 qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
885 struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
888 if (qla27xx_fwdt_template_valid(tmp)) {
889 len = tmp->template_size;
890 tmp = memcpy(vha->hw->fw_dump, tmp, len);
891 ql27xx_edit_template(vha, tmp);
892 qla27xx_walk_template(vha, tmp, tmp, &len);
893 vha->hw->fw_dump_len = len;
894 vha->hw->fw_dumped = 1;
899 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
901 struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
904 if (qla27xx_fwdt_template_valid(tmp)) {
905 len = tmp->template_size;
906 qla27xx_walk_template(vha, tmp, NULL, &len);
913 qla27xx_fwdt_template_size(void *p)
915 struct qla27xx_fwdt_template *tmp = p;
917 return tmp->template_size;
921 qla27xx_fwdt_template_default_size(void)
923 return sizeof(ql27xx_fwdt_default_template);
927 qla27xx_fwdt_template_default(void)
929 return ql27xx_fwdt_default_template;
933 qla27xx_fwdt_template_valid(void *p)
935 struct qla27xx_fwdt_template *tmp = p;
937 if (!qla27xx_verify_template_header(tmp)) {
938 ql_log(ql_log_warn, NULL, 0xd01c,
939 "%s: template type %x\n", __func__, tmp->template_type);
943 if (!qla27xx_verify_template_checksum(tmp)) {
944 ql_log(ql_log_warn, NULL, 0xd01d,
945 "%s: failed template checksum\n", __func__);
953 qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
957 if (!hardware_locked)
958 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
960 if (!vha->hw->fw_dump)
961 ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
962 else if (!vha->hw->fw_dump_template)
963 ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
965 qla27xx_execute_fwdt_template(vha);
967 if (!hardware_locked)
968 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);