Merge branch '4.14-features' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[linux-2.6-microblaze.git] / drivers / s390 / scsi / zfcp_dbf.c
1 /*
2  * zfcp device driver
3  *
4  * Debug traces for zfcp.
5  *
6  * Copyright IBM Corp. 2002, 2017
7  */
8
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/module.h>
13 #include <linux/ctype.h>
14 #include <linux/slab.h>
15 #include <asm/debug.h>
16 #include "zfcp_dbf.h"
17 #include "zfcp_ext.h"
18 #include "zfcp_fc.h"
19
20 static u32 dbfsize = 4;
21
22 module_param(dbfsize, uint, 0400);
23 MODULE_PARM_DESC(dbfsize,
24                  "number of pages for each debug feature area (default 4)");
25
26 static u32 dbflevel = 3;
27
28 module_param(dbflevel, uint, 0400);
29 MODULE_PARM_DESC(dbflevel,
30                  "log level for each debug feature area "
31                  "(default 3, range 0..6)");
32
33 static inline unsigned int zfcp_dbf_plen(unsigned int offset)
34 {
35         return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
36 }
37
38 static inline
39 void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
40                        u64 req_id)
41 {
42         struct zfcp_dbf_pay *pl = &dbf->pay_buf;
43         u16 offset = 0, rec_length;
44
45         spin_lock(&dbf->pay_lock);
46         memset(pl, 0, sizeof(*pl));
47         pl->fsf_req_id = req_id;
48         memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
49
50         while (offset < length) {
51                 rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
52                                  (u16) (length - offset));
53                 memcpy(pl->data, data + offset, rec_length);
54                 debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
55
56                 offset += rec_length;
57                 pl->counter++;
58         }
59
60         spin_unlock(&dbf->pay_lock);
61 }
62
63 /**
64  * zfcp_dbf_hba_fsf_res - trace event for fsf responses
65  * @tag: tag indicating which kind of unsolicited status has been received
66  * @req: request for which a response was received
67  */
68 void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
69 {
70         struct zfcp_dbf *dbf = req->adapter->dbf;
71         struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
72         struct fsf_qtcb_header *q_head = &req->qtcb->header;
73         struct zfcp_dbf_hba *rec = &dbf->hba_buf;
74         unsigned long flags;
75
76         spin_lock_irqsave(&dbf->hba_lock, flags);
77         memset(rec, 0, sizeof(*rec));
78
79         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
80         rec->id = ZFCP_DBF_HBA_RES;
81         rec->fsf_req_id = req->req_id;
82         rec->fsf_req_status = req->status;
83         rec->fsf_cmd = req->fsf_command;
84         rec->fsf_seq_no = req->seq_no;
85         rec->u.res.req_issued = req->issued;
86         rec->u.res.prot_status = q_pref->prot_status;
87         rec->u.res.fsf_status = q_head->fsf_status;
88         rec->u.res.port_handle = q_head->port_handle;
89         rec->u.res.lun_handle = q_head->lun_handle;
90
91         memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
92                FSF_PROT_STATUS_QUAL_SIZE);
93         memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
94                FSF_STATUS_QUALIFIER_SIZE);
95
96         if (req->fsf_command != FSF_QTCB_FCP_CMND) {
97                 rec->pl_len = q_head->log_length;
98                 zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
99                                   rec->pl_len, "fsf_res", req->req_id);
100         }
101
102         debug_event(dbf->hba, level, rec, sizeof(*rec));
103         spin_unlock_irqrestore(&dbf->hba_lock, flags);
104 }
105
106 /**
107  * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
108  * @tag: tag indicating which kind of unsolicited status has been received
109  * @req: request providing the unsolicited status
110  */
111 void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
112 {
113         struct zfcp_dbf *dbf = req->adapter->dbf;
114         struct fsf_status_read_buffer *srb = req->data;
115         struct zfcp_dbf_hba *rec = &dbf->hba_buf;
116         static int const level = 2;
117         unsigned long flags;
118
119         if (unlikely(!debug_level_enabled(dbf->hba, level)))
120                 return;
121
122         spin_lock_irqsave(&dbf->hba_lock, flags);
123         memset(rec, 0, sizeof(*rec));
124
125         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
126         rec->id = ZFCP_DBF_HBA_USS;
127         rec->fsf_req_id = req->req_id;
128         rec->fsf_req_status = req->status;
129         rec->fsf_cmd = req->fsf_command;
130
131         if (!srb)
132                 goto log;
133
134         rec->u.uss.status_type = srb->status_type;
135         rec->u.uss.status_subtype = srb->status_subtype;
136         rec->u.uss.d_id = ntoh24(srb->d_id);
137         rec->u.uss.lun = srb->fcp_lun;
138         memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
139                sizeof(rec->u.uss.queue_designator));
140
141         /* status read buffer payload length */
142         rec->pl_len = (!srb->length) ? 0 : srb->length -
143                         offsetof(struct fsf_status_read_buffer, payload);
144
145         if (rec->pl_len)
146                 zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
147                                   "fsf_uss", req->req_id);
148 log:
149         debug_event(dbf->hba, level, rec, sizeof(*rec));
150         spin_unlock_irqrestore(&dbf->hba_lock, flags);
151 }
152
153 /**
154  * zfcp_dbf_hba_bit_err - trace event for bit error conditions
155  * @tag: tag indicating which kind of unsolicited status has been received
156  * @req: request which caused the bit_error condition
157  */
158 void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
159 {
160         struct zfcp_dbf *dbf = req->adapter->dbf;
161         struct zfcp_dbf_hba *rec = &dbf->hba_buf;
162         struct fsf_status_read_buffer *sr_buf = req->data;
163         static int const level = 1;
164         unsigned long flags;
165
166         if (unlikely(!debug_level_enabled(dbf->hba, level)))
167                 return;
168
169         spin_lock_irqsave(&dbf->hba_lock, flags);
170         memset(rec, 0, sizeof(*rec));
171
172         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
173         rec->id = ZFCP_DBF_HBA_BIT;
174         rec->fsf_req_id = req->req_id;
175         rec->fsf_req_status = req->status;
176         rec->fsf_cmd = req->fsf_command;
177         memcpy(&rec->u.be, &sr_buf->payload.bit_error,
178                sizeof(struct fsf_bit_error_payload));
179
180         debug_event(dbf->hba, level, rec, sizeof(*rec));
181         spin_unlock_irqrestore(&dbf->hba_lock, flags);
182 }
183
184 /**
185  * zfcp_dbf_hba_def_err - trace event for deferred error messages
186  * @adapter: pointer to struct zfcp_adapter
187  * @req_id: request id which caused the deferred error message
188  * @scount: number of sbals incl. the signaling sbal
189  * @pl: array of all involved sbals
190  */
191 void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
192                           void **pl)
193 {
194         struct zfcp_dbf *dbf = adapter->dbf;
195         struct zfcp_dbf_pay *payload = &dbf->pay_buf;
196         unsigned long flags;
197         static int const level = 1;
198         u16 length;
199
200         if (unlikely(!debug_level_enabled(dbf->pay, level)))
201                 return;
202
203         if (!pl)
204                 return;
205
206         spin_lock_irqsave(&dbf->pay_lock, flags);
207         memset(payload, 0, sizeof(*payload));
208
209         memcpy(payload->area, "def_err", 7);
210         payload->fsf_req_id = req_id;
211         payload->counter = 0;
212         length = min((u16)sizeof(struct qdio_buffer),
213                      (u16)ZFCP_DBF_PAY_MAX_REC);
214
215         while (payload->counter < scount && (char *)pl[payload->counter]) {
216                 memcpy(payload->data, (char *)pl[payload->counter], length);
217                 debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
218                 payload->counter++;
219         }
220
221         spin_unlock_irqrestore(&dbf->pay_lock, flags);
222 }
223
224 /**
225  * zfcp_dbf_hba_basic - trace event for basic adapter events
226  * @adapter: pointer to struct zfcp_adapter
227  */
228 void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
229 {
230         struct zfcp_dbf *dbf = adapter->dbf;
231         struct zfcp_dbf_hba *rec = &dbf->hba_buf;
232         static int const level = 1;
233         unsigned long flags;
234
235         if (unlikely(!debug_level_enabled(dbf->hba, level)))
236                 return;
237
238         spin_lock_irqsave(&dbf->hba_lock, flags);
239         memset(rec, 0, sizeof(*rec));
240
241         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
242         rec->id = ZFCP_DBF_HBA_BASIC;
243
244         debug_event(dbf->hba, level, rec, sizeof(*rec));
245         spin_unlock_irqrestore(&dbf->hba_lock, flags);
246 }
247
248 static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
249                                 struct zfcp_adapter *adapter,
250                                 struct zfcp_port *port,
251                                 struct scsi_device *sdev)
252 {
253         rec->adapter_status = atomic_read(&adapter->status);
254         if (port) {
255                 rec->port_status = atomic_read(&port->status);
256                 rec->wwpn = port->wwpn;
257                 rec->d_id = port->d_id;
258         }
259         if (sdev) {
260                 rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
261                 rec->lun = zfcp_scsi_dev_lun(sdev);
262         } else
263                 rec->lun = ZFCP_DBF_INVALID_LUN;
264 }
265
266 /**
267  * zfcp_dbf_rec_trig - trace event related to triggered recovery
268  * @tag: identifier for event
269  * @adapter: adapter on which the erp_action should run
270  * @port: remote port involved in the erp_action
271  * @sdev: scsi device involved in the erp_action
272  * @want: wanted erp_action
273  * @need: required erp_action
274  *
275  * The adapter->erp_lock has to be held.
276  */
277 void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
278                        struct zfcp_port *port, struct scsi_device *sdev,
279                        u8 want, u8 need)
280 {
281         struct zfcp_dbf *dbf = adapter->dbf;
282         struct zfcp_dbf_rec *rec = &dbf->rec_buf;
283         static int const level = 1;
284         struct list_head *entry;
285         unsigned long flags;
286
287         if (unlikely(!debug_level_enabled(dbf->rec, level)))
288                 return;
289
290         spin_lock_irqsave(&dbf->rec_lock, flags);
291         memset(rec, 0, sizeof(*rec));
292
293         rec->id = ZFCP_DBF_REC_TRIG;
294         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
295         zfcp_dbf_set_common(rec, adapter, port, sdev);
296
297         list_for_each(entry, &adapter->erp_ready_head)
298                 rec->u.trig.ready++;
299
300         list_for_each(entry, &adapter->erp_running_head)
301                 rec->u.trig.running++;
302
303         rec->u.trig.want = want;
304         rec->u.trig.need = need;
305
306         debug_event(dbf->rec, level, rec, sizeof(*rec));
307         spin_unlock_irqrestore(&dbf->rec_lock, flags);
308 }
309
310
311 /**
312  * zfcp_dbf_rec_run_lvl - trace event related to running recovery
313  * @level: trace level to be used for event
314  * @tag: identifier for event
315  * @erp: erp_action running
316  */
317 void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
318 {
319         struct zfcp_dbf *dbf = erp->adapter->dbf;
320         struct zfcp_dbf_rec *rec = &dbf->rec_buf;
321         unsigned long flags;
322
323         if (!debug_level_enabled(dbf->rec, level))
324                 return;
325
326         spin_lock_irqsave(&dbf->rec_lock, flags);
327         memset(rec, 0, sizeof(*rec));
328
329         rec->id = ZFCP_DBF_REC_RUN;
330         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
331         zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
332
333         rec->u.run.fsf_req_id = erp->fsf_req_id;
334         rec->u.run.rec_status = erp->status;
335         rec->u.run.rec_step = erp->step;
336         rec->u.run.rec_action = erp->action;
337
338         if (erp->sdev)
339                 rec->u.run.rec_count =
340                         atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
341         else if (erp->port)
342                 rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
343         else
344                 rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
345
346         debug_event(dbf->rec, level, rec, sizeof(*rec));
347         spin_unlock_irqrestore(&dbf->rec_lock, flags);
348 }
349
350 /**
351  * zfcp_dbf_rec_run - trace event related to running recovery
352  * @tag: identifier for event
353  * @erp: erp_action running
354  */
355 void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
356 {
357         zfcp_dbf_rec_run_lvl(1, tag, erp);
358 }
359
360 /**
361  * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
362  * @tag: identifier for event
363  * @wka_port: well known address port
364  * @req_id: request ID to correlate with potential HBA trace record
365  */
366 void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
367                           u64 req_id)
368 {
369         struct zfcp_dbf *dbf = wka_port->adapter->dbf;
370         struct zfcp_dbf_rec *rec = &dbf->rec_buf;
371         static int const level = 1;
372         unsigned long flags;
373
374         if (unlikely(!debug_level_enabled(dbf->rec, level)))
375                 return;
376
377         spin_lock_irqsave(&dbf->rec_lock, flags);
378         memset(rec, 0, sizeof(*rec));
379
380         rec->id = ZFCP_DBF_REC_RUN;
381         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
382         rec->port_status = wka_port->status;
383         rec->d_id = wka_port->d_id;
384         rec->lun = ZFCP_DBF_INVALID_LUN;
385
386         rec->u.run.fsf_req_id = req_id;
387         rec->u.run.rec_status = ~0;
388         rec->u.run.rec_step = ~0;
389         rec->u.run.rec_action = ~0;
390         rec->u.run.rec_count = ~0;
391
392         debug_event(dbf->rec, level, rec, sizeof(*rec));
393         spin_unlock_irqrestore(&dbf->rec_lock, flags);
394 }
395
396 #define ZFCP_DBF_SAN_LEVEL 1
397
398 static inline
399 void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
400                   char *paytag, struct scatterlist *sg, u8 id, u16 len,
401                   u64 req_id, u32 d_id, u16 cap_len)
402 {
403         struct zfcp_dbf_san *rec = &dbf->san_buf;
404         u16 rec_len;
405         unsigned long flags;
406         struct zfcp_dbf_pay *payload = &dbf->pay_buf;
407         u16 pay_sum = 0;
408
409         spin_lock_irqsave(&dbf->san_lock, flags);
410         memset(rec, 0, sizeof(*rec));
411
412         rec->id = id;
413         rec->fsf_req_id = req_id;
414         rec->d_id = d_id;
415         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
416         rec->pl_len = len; /* full length even if we cap pay below */
417         if (!sg)
418                 goto out;
419         rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
420         memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
421         if (len <= rec_len)
422                 goto out; /* skip pay record if full content in rec->payload */
423
424         /* if (len > rec_len):
425          * dump data up to cap_len ignoring small duplicate in rec->payload
426          */
427         spin_lock(&dbf->pay_lock);
428         memset(payload, 0, sizeof(*payload));
429         memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
430         payload->fsf_req_id = req_id;
431         payload->counter = 0;
432         for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
433                 u16 pay_len, offset = 0;
434
435                 while (offset < sg->length && pay_sum < cap_len) {
436                         pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
437                                       (u16)(sg->length - offset));
438                         /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
439                         memcpy(payload->data, sg_virt(sg) + offset, pay_len);
440                         debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
441                                     zfcp_dbf_plen(pay_len));
442                         payload->counter++;
443                         offset += pay_len;
444                         pay_sum += pay_len;
445                 }
446         }
447         spin_unlock(&dbf->pay_lock);
448
449 out:
450         debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
451         spin_unlock_irqrestore(&dbf->san_lock, flags);
452 }
453
454 /**
455  * zfcp_dbf_san_req - trace event for issued SAN request
456  * @tag: identifier for event
457  * @fsf_req: request containing issued CT data
458  * d_id: destination ID
459  */
460 void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
461 {
462         struct zfcp_dbf *dbf = fsf->adapter->dbf;
463         struct zfcp_fsf_ct_els *ct_els = fsf->data;
464         u16 length;
465
466         if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
467                 return;
468
469         length = (u16)zfcp_qdio_real_bytes(ct_els->req);
470         zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
471                      length, fsf->req_id, d_id, length);
472 }
473
474 static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
475                                               struct zfcp_fsf_req *fsf,
476                                               u16 len)
477 {
478         struct zfcp_fsf_ct_els *ct_els = fsf->data;
479         struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
480         struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
481         struct scatterlist *resp_entry = ct_els->resp;
482         struct fc_ct_hdr *resph;
483         struct fc_gpn_ft_resp *acc;
484         int max_entries, x, last = 0;
485
486         if (!(memcmp(tag, "fsscth2", 7) == 0
487               && ct_els->d_id == FC_FID_DIR_SERV
488               && reqh->ct_rev == FC_CT_REV
489               && reqh->ct_in_id[0] == 0
490               && reqh->ct_in_id[1] == 0
491               && reqh->ct_in_id[2] == 0
492               && reqh->ct_fs_type == FC_FST_DIR
493               && reqh->ct_fs_subtype == FC_NS_SUBTYPE
494               && reqh->ct_options == 0
495               && reqh->_ct_resvd1 == 0
496               && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
497               /* reqh->ct_mr_size can vary so do not match but read below */
498               && reqh->_ct_resvd2 == 0
499               && reqh->ct_reason == 0
500               && reqh->ct_explan == 0
501               && reqh->ct_vendor == 0
502               && reqn->fn_resvd == 0
503               && reqn->fn_domain_id_scope == 0
504               && reqn->fn_area_id_scope == 0
505               && reqn->fn_fc4_type == FC_TYPE_FCP))
506                 return len; /* not GPN_FT response so do not cap */
507
508         acc = sg_virt(resp_entry);
509
510         /* cap all but accept CT responses to at least the CT header */
511         resph = (struct fc_ct_hdr *)acc;
512         if ((ct_els->status) ||
513             (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
514                 return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
515
516         max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
517                        sizeof(struct fc_gpn_ft_resp))
518                 + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
519                      * to account for header as 1st pseudo "entry" */;
520
521         /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
522          * response, allowing us to skip special handling for it - just skip it
523          */
524         for (x = 1; x < max_entries && !last; x++) {
525                 if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
526                         acc++;
527                 else
528                         acc = sg_virt(++resp_entry);
529
530                 last = acc->fp_flags & FC_NS_FID_LAST;
531         }
532         len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
533         return len; /* cap after last entry */
534 }
535
536 /**
537  * zfcp_dbf_san_res - trace event for received SAN request
538  * @tag: identifier for event
539  * @fsf_req: request containing issued CT data
540  */
541 void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
542 {
543         struct zfcp_dbf *dbf = fsf->adapter->dbf;
544         struct zfcp_fsf_ct_els *ct_els = fsf->data;
545         u16 length;
546
547         if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
548                 return;
549
550         length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
551         zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
552                      length, fsf->req_id, ct_els->d_id,
553                      zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
554 }
555
556 /**
557  * zfcp_dbf_san_in_els - trace event for incoming ELS
558  * @tag: identifier for event
559  * @fsf_req: request containing issued CT data
560  */
561 void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
562 {
563         struct zfcp_dbf *dbf = fsf->adapter->dbf;
564         struct fsf_status_read_buffer *srb =
565                 (struct fsf_status_read_buffer *) fsf->data;
566         u16 length;
567         struct scatterlist sg;
568
569         if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
570                 return;
571
572         length = (u16)(srb->length -
573                         offsetof(struct fsf_status_read_buffer, payload));
574         sg_init_one(&sg, srb->payload.data, length);
575         zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
576                      fsf->req_id, ntoh24(srb->d_id), length);
577 }
578
579 /**
580  * zfcp_dbf_scsi - trace event for scsi commands
581  * @tag: identifier for event
582  * @sc: pointer to struct scsi_cmnd
583  * @fsf: pointer to struct zfcp_fsf_req
584  */
585 void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
586                    struct zfcp_fsf_req *fsf)
587 {
588         struct zfcp_adapter *adapter =
589                 (struct zfcp_adapter *) sc->device->host->hostdata[0];
590         struct zfcp_dbf *dbf = adapter->dbf;
591         struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
592         struct fcp_resp_with_ext *fcp_rsp;
593         struct fcp_resp_rsp_info *fcp_rsp_info;
594         unsigned long flags;
595
596         spin_lock_irqsave(&dbf->scsi_lock, flags);
597         memset(rec, 0, sizeof(*rec));
598
599         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
600         rec->id = ZFCP_DBF_SCSI_CMND;
601         rec->scsi_result = sc->result;
602         rec->scsi_retries = sc->retries;
603         rec->scsi_allowed = sc->allowed;
604         rec->scsi_id = sc->device->id;
605         rec->scsi_lun = (u32)sc->device->lun;
606         rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
607         rec->host_scribble = (unsigned long)sc->host_scribble;
608
609         memcpy(rec->scsi_opcode, sc->cmnd,
610                min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
611
612         if (fsf) {
613                 rec->fsf_req_id = fsf->req_id;
614                 rec->pl_len = FCP_RESP_WITH_EXT;
615                 fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
616                 /* mandatory parts of FCP_RSP IU in this SCSI record */
617                 memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
618                 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
619                         fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
620                         rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
621                         rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
622                 }
623                 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
624                         rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
625                 }
626                 /* complete FCP_RSP IU in associated PAYload record
627                  * but only if there are optional parts
628                  */
629                 if (fcp_rsp->resp.fr_flags != 0)
630                         zfcp_dbf_pl_write(
631                                 dbf, fcp_rsp,
632                                 /* at least one full PAY record
633                                  * but not beyond hardware response field
634                                  */
635                                 min_t(u16, max_t(u16, rec->pl_len,
636                                                  ZFCP_DBF_PAY_MAX_REC),
637                                       FSF_FCP_RSP_SIZE),
638                                 "fcp_riu", fsf->req_id);
639         }
640
641         debug_event(dbf->scsi, level, rec, sizeof(*rec));
642         spin_unlock_irqrestore(&dbf->scsi_lock, flags);
643 }
644
645 static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
646 {
647         struct debug_info *d;
648
649         d = debug_register(name, size, 1, rec_size);
650         if (!d)
651                 return NULL;
652
653         debug_register_view(d, &debug_hex_ascii_view);
654         debug_set_level(d, dbflevel);
655
656         return d;
657 }
658
659 static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
660 {
661         if (!dbf)
662                 return;
663
664         debug_unregister(dbf->scsi);
665         debug_unregister(dbf->san);
666         debug_unregister(dbf->hba);
667         debug_unregister(dbf->pay);
668         debug_unregister(dbf->rec);
669         kfree(dbf);
670 }
671
672 /**
673  * zfcp_adapter_debug_register - registers debug feature for an adapter
674  * @adapter: pointer to adapter for which debug features should be registered
675  * return: -ENOMEM on error, 0 otherwise
676  */
677 int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
678 {
679         char name[DEBUG_MAX_NAME_LEN];
680         struct zfcp_dbf *dbf;
681
682         dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
683         if (!dbf)
684                 return -ENOMEM;
685
686         spin_lock_init(&dbf->pay_lock);
687         spin_lock_init(&dbf->hba_lock);
688         spin_lock_init(&dbf->san_lock);
689         spin_lock_init(&dbf->scsi_lock);
690         spin_lock_init(&dbf->rec_lock);
691
692         /* debug feature area which records recovery activity */
693         sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
694         dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
695         if (!dbf->rec)
696                 goto err_out;
697
698         /* debug feature area which records HBA (FSF and QDIO) conditions */
699         sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
700         dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
701         if (!dbf->hba)
702                 goto err_out;
703
704         /* debug feature area which records payload info */
705         sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
706         dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
707         if (!dbf->pay)
708                 goto err_out;
709
710         /* debug feature area which records SAN command failures and recovery */
711         sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
712         dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
713         if (!dbf->san)
714                 goto err_out;
715
716         /* debug feature area which records SCSI command failures and recovery */
717         sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
718         dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
719         if (!dbf->scsi)
720                 goto err_out;
721
722         adapter->dbf = dbf;
723
724         return 0;
725 err_out:
726         zfcp_dbf_unregister(dbf);
727         return -ENOMEM;
728 }
729
730 /**
731  * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
732  * @adapter: pointer to adapter for which debug features should be unregistered
733  */
734 void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
735 {
736         struct zfcp_dbf *dbf = adapter->dbf;
737
738         adapter->dbf = NULL;
739         zfcp_dbf_unregister(dbf);
740 }
741