Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / s390 / block / dasd_eckd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *                  Horst Hummel <Horst.Hummel@de.ibm.com>
5  *                  Carsten Otte <Cotte@de.ibm.com>
6  *                  Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12
13 #define KMSG_COMPONENT "dasd-eckd"
14
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>        /* HDIO_GETGEO                      */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/compat.h>
22 #include <linux/init.h>
23 #include <linux/seq_file.h>
24
25 #include <asm/css_chars.h>
26 #include <asm/debug.h>
27 #include <asm/idals.h>
28 #include <asm/ebcdic.h>
29 #include <asm/io.h>
30 #include <linux/uaccess.h>
31 #include <asm/cio.h>
32 #include <asm/ccwdev.h>
33 #include <asm/itcw.h>
34 #include <asm/schid.h>
35 #include <asm/chpid.h>
36
37 #include "dasd_int.h"
38 #include "dasd_eckd.h"
39
40 #ifdef PRINTK_HEADER
41 #undef PRINTK_HEADER
42 #endif                          /* PRINTK_HEADER */
43 #define PRINTK_HEADER "dasd(eckd):"
44
45 /*
46  * raw track access always map to 64k in memory
47  * so it maps to 16 blocks of 4k per track
48  */
49 #define DASD_RAW_BLOCK_PER_TRACK 16
50 #define DASD_RAW_BLOCKSIZE 4096
51 /* 64k are 128 x 512 byte sectors  */
52 #define DASD_RAW_SECTORS_PER_TRACK 128
53
54 MODULE_LICENSE("GPL");
55
56 static struct dasd_discipline dasd_eckd_discipline;
57
58 /* The ccw bus type uses this table to find devices that it sends to
59  * dasd_eckd_probe */
60 static struct ccw_device_id dasd_eckd_ids[] = {
61         { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
62         { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
63         { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
64         { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
65         { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
66         { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
67         { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
68         { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
69         { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
70         { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
71         { /* end of list */ },
72 };
73
74 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
75
76 static struct ccw_driver dasd_eckd_driver; /* see below */
77
78 static void *rawpadpage;
79
80 #define INIT_CQR_OK 0
81 #define INIT_CQR_UNFORMATTED 1
82 #define INIT_CQR_ERROR 2
83
84 /* emergency request for reserve/release */
85 static struct {
86         struct dasd_ccw_req cqr;
87         struct ccw1 ccw;
88         char data[32];
89 } *dasd_reserve_req;
90 static DEFINE_MUTEX(dasd_reserve_mutex);
91
92 static struct {
93         struct dasd_ccw_req cqr;
94         struct ccw1 ccw[2];
95         char data[40];
96 } *dasd_vol_info_req;
97 static DEFINE_MUTEX(dasd_vol_info_mutex);
98
99 struct ext_pool_exhaust_work_data {
100         struct work_struct worker;
101         struct dasd_device *device;
102         struct dasd_device *base;
103 };
104
105 /* definitions for the path verification worker */
106 struct path_verification_work_data {
107         struct work_struct worker;
108         struct dasd_device *device;
109         struct dasd_ccw_req cqr;
110         struct ccw1 ccw;
111         __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
112         int isglobal;
113         __u8 tbvpm;
114 };
115 static struct path_verification_work_data *path_verification_worker;
116 static DEFINE_MUTEX(dasd_path_verification_mutex);
117
118 struct check_attention_work_data {
119         struct work_struct worker;
120         struct dasd_device *device;
121         __u8 lpum;
122 };
123
124 static int dasd_eckd_ext_pool_id(struct dasd_device *);
125 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
126                         struct dasd_device *, struct dasd_device *,
127                         unsigned int, int, unsigned int, unsigned int,
128                         unsigned int, unsigned int);
129
130 /* initial attempt at a probe function. this can be simplified once
131  * the other detection code is gone */
132 static int
133 dasd_eckd_probe (struct ccw_device *cdev)
134 {
135         int ret;
136
137         /* set ECKD specific ccw-device options */
138         ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
139                                      CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
140         if (ret) {
141                 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
142                                 "dasd_eckd_probe: could not set "
143                                 "ccw-device options");
144                 return ret;
145         }
146         ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
147         return ret;
148 }
149
150 static int
151 dasd_eckd_set_online(struct ccw_device *cdev)
152 {
153         return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
154 }
155
156 static const int sizes_trk0[] = { 28, 148, 84 };
157 #define LABEL_SIZE 140
158
159 /* head and record addresses of count_area read in analysis ccw */
160 static const int count_area_head[] = { 0, 0, 0, 0, 1 };
161 static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
162
163 static inline unsigned int
164 ceil_quot(unsigned int d1, unsigned int d2)
165 {
166         return (d1 + (d2 - 1)) / d2;
167 }
168
169 static unsigned int
170 recs_per_track(struct dasd_eckd_characteristics * rdc,
171                unsigned int kl, unsigned int dl)
172 {
173         int dn, kn;
174
175         switch (rdc->dev_type) {
176         case 0x3380:
177                 if (kl)
178                         return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
179                                        ceil_quot(dl + 12, 32));
180                 else
181                         return 1499 / (15 + ceil_quot(dl + 12, 32));
182         case 0x3390:
183                 dn = ceil_quot(dl + 6, 232) + 1;
184                 if (kl) {
185                         kn = ceil_quot(kl + 6, 232) + 1;
186                         return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
187                                        9 + ceil_quot(dl + 6 * dn, 34));
188                 } else
189                         return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
190         case 0x9345:
191                 dn = ceil_quot(dl + 6, 232) + 1;
192                 if (kl) {
193                         kn = ceil_quot(kl + 6, 232) + 1;
194                         return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
195                                        ceil_quot(dl + 6 * dn, 34));
196                 } else
197                         return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
198         }
199         return 0;
200 }
201
202 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
203 {
204         geo->cyl = (__u16) cyl;
205         geo->head = cyl >> 16;
206         geo->head <<= 4;
207         geo->head |= head;
208 }
209
210 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
211                      struct dasd_device *device)
212 {
213         struct dasd_eckd_private *private = device->private;
214         int rc;
215
216         rc = get_phys_clock(&data->ep_sys_time);
217         /*
218          * Ignore return code if XRC is not supported or
219          * sync clock is switched off
220          */
221         if ((rc && !private->rdc_data.facilities.XRC_supported) ||
222             rc == -EOPNOTSUPP || rc == -EACCES)
223                 return 0;
224
225         /* switch on System Time Stamp - needed for XRC Support */
226         data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
227         data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
228
229         if (ccw) {
230                 ccw->count = sizeof(struct DE_eckd_data);
231                 ccw->flags |= CCW_FLAG_SLI;
232         }
233
234         return rc;
235 }
236
237 static int
238 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
239               unsigned int totrk, int cmd, struct dasd_device *device,
240               int blksize)
241 {
242         struct dasd_eckd_private *private = device->private;
243         u16 heads, beghead, endhead;
244         u32 begcyl, endcyl;
245         int rc = 0;
246
247         if (ccw) {
248                 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
249                 ccw->flags = 0;
250                 ccw->count = 16;
251                 ccw->cda = (__u32)__pa(data);
252         }
253
254         memset(data, 0, sizeof(struct DE_eckd_data));
255         switch (cmd) {
256         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
257         case DASD_ECKD_CCW_READ_RECORD_ZERO:
258         case DASD_ECKD_CCW_READ:
259         case DASD_ECKD_CCW_READ_MT:
260         case DASD_ECKD_CCW_READ_CKD:
261         case DASD_ECKD_CCW_READ_CKD_MT:
262         case DASD_ECKD_CCW_READ_KD:
263         case DASD_ECKD_CCW_READ_KD_MT:
264                 data->mask.perm = 0x1;
265                 data->attributes.operation = private->attrib.operation;
266                 break;
267         case DASD_ECKD_CCW_READ_COUNT:
268                 data->mask.perm = 0x1;
269                 data->attributes.operation = DASD_BYPASS_CACHE;
270                 break;
271         case DASD_ECKD_CCW_READ_TRACK:
272         case DASD_ECKD_CCW_READ_TRACK_DATA:
273                 data->mask.perm = 0x1;
274                 data->attributes.operation = private->attrib.operation;
275                 data->blk_size = 0;
276                 break;
277         case DASD_ECKD_CCW_WRITE:
278         case DASD_ECKD_CCW_WRITE_MT:
279         case DASD_ECKD_CCW_WRITE_KD:
280         case DASD_ECKD_CCW_WRITE_KD_MT:
281                 data->mask.perm = 0x02;
282                 data->attributes.operation = private->attrib.operation;
283                 rc = set_timestamp(ccw, data, device);
284                 break;
285         case DASD_ECKD_CCW_WRITE_CKD:
286         case DASD_ECKD_CCW_WRITE_CKD_MT:
287                 data->attributes.operation = DASD_BYPASS_CACHE;
288                 rc = set_timestamp(ccw, data, device);
289                 break;
290         case DASD_ECKD_CCW_ERASE:
291         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
292         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
293                 data->mask.perm = 0x3;
294                 data->mask.auth = 0x1;
295                 data->attributes.operation = DASD_BYPASS_CACHE;
296                 rc = set_timestamp(ccw, data, device);
297                 break;
298         case DASD_ECKD_CCW_WRITE_FULL_TRACK:
299                 data->mask.perm = 0x03;
300                 data->attributes.operation = private->attrib.operation;
301                 data->blk_size = 0;
302                 break;
303         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
304                 data->mask.perm = 0x02;
305                 data->attributes.operation = private->attrib.operation;
306                 data->blk_size = blksize;
307                 rc = set_timestamp(ccw, data, device);
308                 break;
309         default:
310                 dev_err(&device->cdev->dev,
311                         "0x%x is not a known command\n", cmd);
312                 break;
313         }
314
315         data->attributes.mode = 0x3;    /* ECKD */
316
317         if ((private->rdc_data.cu_type == 0x2105 ||
318              private->rdc_data.cu_type == 0x2107 ||
319              private->rdc_data.cu_type == 0x1750)
320             && !(private->uses_cdl && trk < 2))
321                 data->ga_extended |= 0x40; /* Regular Data Format Mode */
322
323         heads = private->rdc_data.trk_per_cyl;
324         begcyl = trk / heads;
325         beghead = trk % heads;
326         endcyl = totrk / heads;
327         endhead = totrk % heads;
328
329         /* check for sequential prestage - enhance cylinder range */
330         if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
331             data->attributes.operation == DASD_SEQ_ACCESS) {
332
333                 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
334                         endcyl += private->attrib.nr_cyl;
335                 else
336                         endcyl = (private->real_cyl - 1);
337         }
338
339         set_ch_t(&data->beg_ext, begcyl, beghead);
340         set_ch_t(&data->end_ext, endcyl, endhead);
341         return rc;
342 }
343
344
345 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
346                               unsigned int trk, unsigned int rec_on_trk,
347                               int count, int cmd, struct dasd_device *device,
348                               unsigned int reclen, unsigned int tlf)
349 {
350         struct dasd_eckd_private *private = device->private;
351         int sector;
352         int dn, d;
353
354         if (ccw) {
355                 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
356                 ccw->flags = 0;
357                 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
358                         ccw->count = 22;
359                 else
360                         ccw->count = 20;
361                 ccw->cda = (__u32)__pa(data);
362         }
363
364         memset(data, 0, sizeof(*data));
365         sector = 0;
366         if (rec_on_trk) {
367                 switch (private->rdc_data.dev_type) {
368                 case 0x3390:
369                         dn = ceil_quot(reclen + 6, 232);
370                         d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
371                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
372                         break;
373                 case 0x3380:
374                         d = 7 + ceil_quot(reclen + 12, 32);
375                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
376                         break;
377                 }
378         }
379         data->sector = sector;
380         /* note: meaning of count depends on the operation
381          *       for record based I/O it's the number of records, but for
382          *       track based I/O it's the number of tracks
383          */
384         data->count = count;
385         switch (cmd) {
386         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
387                 data->operation.orientation = 0x3;
388                 data->operation.operation = 0x03;
389                 break;
390         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
391                 data->operation.orientation = 0x3;
392                 data->operation.operation = 0x16;
393                 break;
394         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
395                 data->operation.orientation = 0x1;
396                 data->operation.operation = 0x03;
397                 data->count++;
398                 break;
399         case DASD_ECKD_CCW_READ_RECORD_ZERO:
400                 data->operation.orientation = 0x3;
401                 data->operation.operation = 0x16;
402                 data->count++;
403                 break;
404         case DASD_ECKD_CCW_WRITE:
405         case DASD_ECKD_CCW_WRITE_MT:
406         case DASD_ECKD_CCW_WRITE_KD:
407         case DASD_ECKD_CCW_WRITE_KD_MT:
408                 data->auxiliary.length_valid = 0x1;
409                 data->length = reclen;
410                 data->operation.operation = 0x01;
411                 break;
412         case DASD_ECKD_CCW_WRITE_CKD:
413         case DASD_ECKD_CCW_WRITE_CKD_MT:
414                 data->auxiliary.length_valid = 0x1;
415                 data->length = reclen;
416                 data->operation.operation = 0x03;
417                 break;
418         case DASD_ECKD_CCW_WRITE_FULL_TRACK:
419                 data->operation.orientation = 0x0;
420                 data->operation.operation = 0x3F;
421                 data->extended_operation = 0x11;
422                 data->length = 0;
423                 data->extended_parameter_length = 0x02;
424                 if (data->count > 8) {
425                         data->extended_parameter[0] = 0xFF;
426                         data->extended_parameter[1] = 0xFF;
427                         data->extended_parameter[1] <<= (16 - count);
428                 } else {
429                         data->extended_parameter[0] = 0xFF;
430                         data->extended_parameter[0] <<= (8 - count);
431                         data->extended_parameter[1] = 0x00;
432                 }
433                 data->sector = 0xFF;
434                 break;
435         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
436                 data->auxiliary.length_valid = 0x1;
437                 data->length = reclen;  /* not tlf, as one might think */
438                 data->operation.operation = 0x3F;
439                 data->extended_operation = 0x23;
440                 break;
441         case DASD_ECKD_CCW_READ:
442         case DASD_ECKD_CCW_READ_MT:
443         case DASD_ECKD_CCW_READ_KD:
444         case DASD_ECKD_CCW_READ_KD_MT:
445                 data->auxiliary.length_valid = 0x1;
446                 data->length = reclen;
447                 data->operation.operation = 0x06;
448                 break;
449         case DASD_ECKD_CCW_READ_CKD:
450         case DASD_ECKD_CCW_READ_CKD_MT:
451                 data->auxiliary.length_valid = 0x1;
452                 data->length = reclen;
453                 data->operation.operation = 0x16;
454                 break;
455         case DASD_ECKD_CCW_READ_COUNT:
456                 data->operation.operation = 0x06;
457                 break;
458         case DASD_ECKD_CCW_READ_TRACK:
459                 data->operation.orientation = 0x1;
460                 data->operation.operation = 0x0C;
461                 data->extended_parameter_length = 0;
462                 data->sector = 0xFF;
463                 break;
464         case DASD_ECKD_CCW_READ_TRACK_DATA:
465                 data->auxiliary.length_valid = 0x1;
466                 data->length = tlf;
467                 data->operation.operation = 0x0C;
468                 break;
469         case DASD_ECKD_CCW_ERASE:
470                 data->length = reclen;
471                 data->auxiliary.length_valid = 0x1;
472                 data->operation.operation = 0x0b;
473                 break;
474         default:
475                 DBF_DEV_EVENT(DBF_ERR, device,
476                             "fill LRE unknown opcode 0x%x", cmd);
477                 BUG();
478         }
479         set_ch_t(&data->seek_addr,
480                  trk / private->rdc_data.trk_per_cyl,
481                  trk % private->rdc_data.trk_per_cyl);
482         data->search_arg.cyl = data->seek_addr.cyl;
483         data->search_arg.head = data->seek_addr.head;
484         data->search_arg.record = rec_on_trk;
485 }
486
487 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
488                       unsigned int trk, unsigned int totrk, int cmd,
489                       struct dasd_device *basedev, struct dasd_device *startdev,
490                       unsigned int format, unsigned int rec_on_trk, int count,
491                       unsigned int blksize, unsigned int tlf)
492 {
493         struct dasd_eckd_private *basepriv, *startpriv;
494         struct LRE_eckd_data *lredata;
495         struct DE_eckd_data *dedata;
496         int rc = 0;
497
498         basepriv = basedev->private;
499         startpriv = startdev->private;
500         dedata = &pfxdata->define_extent;
501         lredata = &pfxdata->locate_record;
502
503         ccw->cmd_code = DASD_ECKD_CCW_PFX;
504         ccw->flags = 0;
505         if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
506                 ccw->count = sizeof(*pfxdata) + 2;
507                 ccw->cda = (__u32) __pa(pfxdata);
508                 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
509         } else {
510                 ccw->count = sizeof(*pfxdata);
511                 ccw->cda = (__u32) __pa(pfxdata);
512                 memset(pfxdata, 0, sizeof(*pfxdata));
513         }
514
515         /* prefix data */
516         if (format > 1) {
517                 DBF_DEV_EVENT(DBF_ERR, basedev,
518                               "PFX LRE unknown format 0x%x", format);
519                 BUG();
520                 return -EINVAL;
521         }
522         pfxdata->format = format;
523         pfxdata->base_address = basepriv->ned->unit_addr;
524         pfxdata->base_lss = basepriv->ned->ID;
525         pfxdata->validity.define_extent = 1;
526
527         /* private uid is kept up to date, conf_data may be outdated */
528         if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
529                 pfxdata->validity.verify_base = 1;
530
531         if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
532                 pfxdata->validity.verify_base = 1;
533                 pfxdata->validity.hyper_pav = 1;
534         }
535
536         rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
537
538         /*
539          * For some commands the System Time Stamp is set in the define extent
540          * data when XRC is supported. The validity of the time stamp must be
541          * reflected in the prefix data as well.
542          */
543         if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
544                 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid'   */
545
546         if (format == 1) {
547                 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
548                                   basedev, blksize, tlf);
549         }
550
551         return rc;
552 }
553
554 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
555                   unsigned int trk, unsigned int totrk, int cmd,
556                   struct dasd_device *basedev, struct dasd_device *startdev)
557 {
558         return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
559                           0, 0, 0, 0, 0);
560 }
561
562 static void
563 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
564               unsigned int rec_on_trk, int no_rec, int cmd,
565               struct dasd_device * device, int reclen)
566 {
567         struct dasd_eckd_private *private = device->private;
568         int sector;
569         int dn, d;
570
571         DBF_DEV_EVENT(DBF_INFO, device,
572                   "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
573                   trk, rec_on_trk, no_rec, cmd, reclen);
574
575         ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
576         ccw->flags = 0;
577         ccw->count = 16;
578         ccw->cda = (__u32) __pa(data);
579
580         memset(data, 0, sizeof(struct LO_eckd_data));
581         sector = 0;
582         if (rec_on_trk) {
583                 switch (private->rdc_data.dev_type) {
584                 case 0x3390:
585                         dn = ceil_quot(reclen + 6, 232);
586                         d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
587                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
588                         break;
589                 case 0x3380:
590                         d = 7 + ceil_quot(reclen + 12, 32);
591                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
592                         break;
593                 }
594         }
595         data->sector = sector;
596         data->count = no_rec;
597         switch (cmd) {
598         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
599                 data->operation.orientation = 0x3;
600                 data->operation.operation = 0x03;
601                 break;
602         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
603                 data->operation.orientation = 0x3;
604                 data->operation.operation = 0x16;
605                 break;
606         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
607                 data->operation.orientation = 0x1;
608                 data->operation.operation = 0x03;
609                 data->count++;
610                 break;
611         case DASD_ECKD_CCW_READ_RECORD_ZERO:
612                 data->operation.orientation = 0x3;
613                 data->operation.operation = 0x16;
614                 data->count++;
615                 break;
616         case DASD_ECKD_CCW_WRITE:
617         case DASD_ECKD_CCW_WRITE_MT:
618         case DASD_ECKD_CCW_WRITE_KD:
619         case DASD_ECKD_CCW_WRITE_KD_MT:
620                 data->auxiliary.last_bytes_used = 0x1;
621                 data->length = reclen;
622                 data->operation.operation = 0x01;
623                 break;
624         case DASD_ECKD_CCW_WRITE_CKD:
625         case DASD_ECKD_CCW_WRITE_CKD_MT:
626                 data->auxiliary.last_bytes_used = 0x1;
627                 data->length = reclen;
628                 data->operation.operation = 0x03;
629                 break;
630         case DASD_ECKD_CCW_READ:
631         case DASD_ECKD_CCW_READ_MT:
632         case DASD_ECKD_CCW_READ_KD:
633         case DASD_ECKD_CCW_READ_KD_MT:
634                 data->auxiliary.last_bytes_used = 0x1;
635                 data->length = reclen;
636                 data->operation.operation = 0x06;
637                 break;
638         case DASD_ECKD_CCW_READ_CKD:
639         case DASD_ECKD_CCW_READ_CKD_MT:
640                 data->auxiliary.last_bytes_used = 0x1;
641                 data->length = reclen;
642                 data->operation.operation = 0x16;
643                 break;
644         case DASD_ECKD_CCW_READ_COUNT:
645                 data->operation.operation = 0x06;
646                 break;
647         case DASD_ECKD_CCW_ERASE:
648                 data->length = reclen;
649                 data->auxiliary.last_bytes_used = 0x1;
650                 data->operation.operation = 0x0b;
651                 break;
652         default:
653                 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
654                               "opcode 0x%x", cmd);
655         }
656         set_ch_t(&data->seek_addr,
657                  trk / private->rdc_data.trk_per_cyl,
658                  trk % private->rdc_data.trk_per_cyl);
659         data->search_arg.cyl = data->seek_addr.cyl;
660         data->search_arg.head = data->seek_addr.head;
661         data->search_arg.record = rec_on_trk;
662 }
663
664 /*
665  * Returns 1 if the block is one of the special blocks that needs
666  * to get read/written with the KD variant of the command.
667  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
668  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
669  * Luckily the KD variants differ only by one bit (0x08) from the
670  * normal variant. So don't wonder about code like:
671  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
672  *         ccw->cmd_code |= 0x8;
673  */
674 static inline int
675 dasd_eckd_cdl_special(int blk_per_trk, int recid)
676 {
677         if (recid < 3)
678                 return 1;
679         if (recid < blk_per_trk)
680                 return 0;
681         if (recid < 2 * blk_per_trk)
682                 return 1;
683         return 0;
684 }
685
686 /*
687  * Returns the record size for the special blocks of the cdl format.
688  * Only returns something useful if dasd_eckd_cdl_special is true
689  * for the recid.
690  */
691 static inline int
692 dasd_eckd_cdl_reclen(int recid)
693 {
694         if (recid < 3)
695                 return sizes_trk0[recid];
696         return LABEL_SIZE;
697 }
698 /* create unique id from private structure. */
699 static void create_uid(struct dasd_eckd_private *private)
700 {
701         int count;
702         struct dasd_uid *uid;
703
704         uid = &private->uid;
705         memset(uid, 0, sizeof(struct dasd_uid));
706         memcpy(uid->vendor, private->ned->HDA_manufacturer,
707                sizeof(uid->vendor) - 1);
708         EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
709         memcpy(uid->serial, private->ned->HDA_location,
710                sizeof(uid->serial) - 1);
711         EBCASC(uid->serial, sizeof(uid->serial) - 1);
712         uid->ssid = private->gneq->subsystemID;
713         uid->real_unit_addr = private->ned->unit_addr;
714         if (private->sneq) {
715                 uid->type = private->sneq->sua_flags;
716                 if (uid->type == UA_BASE_PAV_ALIAS)
717                         uid->base_unit_addr = private->sneq->base_unit_addr;
718         } else {
719                 uid->type = UA_BASE_DEVICE;
720         }
721         if (private->vdsneq) {
722                 for (count = 0; count < 16; count++) {
723                         sprintf(uid->vduit+2*count, "%02x",
724                                 private->vdsneq->uit[count]);
725                 }
726         }
727 }
728
729 /*
730  * Generate device unique id that specifies the physical device.
731  */
732 static int dasd_eckd_generate_uid(struct dasd_device *device)
733 {
734         struct dasd_eckd_private *private = device->private;
735         unsigned long flags;
736
737         if (!private)
738                 return -ENODEV;
739         if (!private->ned || !private->gneq)
740                 return -ENODEV;
741         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
742         create_uid(private);
743         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
744         return 0;
745 }
746
747 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
748 {
749         struct dasd_eckd_private *private = device->private;
750         unsigned long flags;
751
752         if (private) {
753                 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
754                 *uid = private->uid;
755                 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
756                 return 0;
757         }
758         return -EINVAL;
759 }
760
761 /*
762  * compare device UID with data of a given dasd_eckd_private structure
763  * return 0 for match
764  */
765 static int dasd_eckd_compare_path_uid(struct dasd_device *device,
766                                       struct dasd_eckd_private *private)
767 {
768         struct dasd_uid device_uid;
769
770         create_uid(private);
771         dasd_eckd_get_uid(device, &device_uid);
772
773         return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
774 }
775
776 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
777                                    struct dasd_ccw_req *cqr,
778                                    __u8 *rcd_buffer,
779                                    __u8 lpm)
780 {
781         struct ccw1 *ccw;
782         /*
783          * buffer has to start with EBCDIC "V1.0" to show
784          * support for virtual device SNEQ
785          */
786         rcd_buffer[0] = 0xE5;
787         rcd_buffer[1] = 0xF1;
788         rcd_buffer[2] = 0x4B;
789         rcd_buffer[3] = 0xF0;
790
791         ccw = cqr->cpaddr;
792         ccw->cmd_code = DASD_ECKD_CCW_RCD;
793         ccw->flags = 0;
794         ccw->cda = (__u32)(addr_t)rcd_buffer;
795         ccw->count = DASD_ECKD_RCD_DATA_SIZE;
796         cqr->magic = DASD_ECKD_MAGIC;
797
798         cqr->startdev = device;
799         cqr->memdev = device;
800         cqr->block = NULL;
801         cqr->expires = 10*HZ;
802         cqr->lpm = lpm;
803         cqr->retries = 256;
804         cqr->buildclk = get_tod_clock();
805         cqr->status = DASD_CQR_FILLED;
806         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
807 }
808
809 /*
810  * Wakeup helper for read_conf
811  * if the cqr is not done and needs some error recovery
812  * the buffer has to be re-initialized with the EBCDIC "V1.0"
813  * to show support for virtual device SNEQ
814  */
815 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
816 {
817         struct ccw1 *ccw;
818         __u8 *rcd_buffer;
819
820         if (cqr->status !=  DASD_CQR_DONE) {
821                 ccw = cqr->cpaddr;
822                 rcd_buffer = (__u8 *)((addr_t) ccw->cda);
823                 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
824
825                 rcd_buffer[0] = 0xE5;
826                 rcd_buffer[1] = 0xF1;
827                 rcd_buffer[2] = 0x4B;
828                 rcd_buffer[3] = 0xF0;
829         }
830         dasd_wakeup_cb(cqr, data);
831 }
832
833 static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
834                                            struct dasd_ccw_req *cqr,
835                                            __u8 *rcd_buffer,
836                                            __u8 lpm)
837 {
838         struct ciw *ciw;
839         int rc;
840         /*
841          * sanity check: scan for RCD command in extended SenseID data
842          * some devices do not support RCD
843          */
844         ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
845         if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
846                 return -EOPNOTSUPP;
847
848         dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
849         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
850         set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
851         cqr->retries = 5;
852         cqr->callback = read_conf_cb;
853         rc = dasd_sleep_on_immediatly(cqr);
854         return rc;
855 }
856
857 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
858                                    void **rcd_buffer,
859                                    int *rcd_buffer_size, __u8 lpm)
860 {
861         struct ciw *ciw;
862         char *rcd_buf = NULL;
863         int ret;
864         struct dasd_ccw_req *cqr;
865
866         /*
867          * sanity check: scan for RCD command in extended SenseID data
868          * some devices do not support RCD
869          */
870         ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
871         if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
872                 ret = -EOPNOTSUPP;
873                 goto out_error;
874         }
875         rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
876         if (!rcd_buf) {
877                 ret = -ENOMEM;
878                 goto out_error;
879         }
880         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
881                                    0, /* use rcd_buf as data ara */
882                                    device, NULL);
883         if (IS_ERR(cqr)) {
884                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
885                               "Could not allocate RCD request");
886                 ret = -ENOMEM;
887                 goto out_error;
888         }
889         dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
890         cqr->callback = read_conf_cb;
891         ret = dasd_sleep_on(cqr);
892         /*
893          * on success we update the user input parms
894          */
895         dasd_sfree_request(cqr, cqr->memdev);
896         if (ret)
897                 goto out_error;
898
899         *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
900         *rcd_buffer = rcd_buf;
901         return 0;
902 out_error:
903         kfree(rcd_buf);
904         *rcd_buffer = NULL;
905         *rcd_buffer_size = 0;
906         return ret;
907 }
908
909 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
910 {
911
912         struct dasd_sneq *sneq;
913         int i, count;
914
915         private->ned = NULL;
916         private->sneq = NULL;
917         private->vdsneq = NULL;
918         private->gneq = NULL;
919         count = private->conf_len / sizeof(struct dasd_sneq);
920         sneq = (struct dasd_sneq *)private->conf_data;
921         for (i = 0; i < count; ++i) {
922                 if (sneq->flags.identifier == 1 && sneq->format == 1)
923                         private->sneq = sneq;
924                 else if (sneq->flags.identifier == 1 && sneq->format == 4)
925                         private->vdsneq = (struct vd_sneq *)sneq;
926                 else if (sneq->flags.identifier == 2)
927                         private->gneq = (struct dasd_gneq *)sneq;
928                 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
929                         private->ned = (struct dasd_ned *)sneq;
930                 sneq++;
931         }
932         if (!private->ned || !private->gneq) {
933                 private->ned = NULL;
934                 private->sneq = NULL;
935                 private->vdsneq = NULL;
936                 private->gneq = NULL;
937                 return -EINVAL;
938         }
939         return 0;
940
941 };
942
943 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
944 {
945         struct dasd_gneq *gneq;
946         int i, count, found;
947
948         count = conf_len / sizeof(*gneq);
949         gneq = (struct dasd_gneq *)conf_data;
950         found = 0;
951         for (i = 0; i < count; ++i) {
952                 if (gneq->flags.identifier == 2) {
953                         found = 1;
954                         break;
955                 }
956                 gneq++;
957         }
958         if (found)
959                 return ((char *)gneq)[18] & 0x07;
960         else
961                 return 0;
962 }
963
964 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
965 {
966         struct dasd_eckd_private *private = device->private;
967         int i;
968
969         private->conf_data = NULL;
970         private->conf_len = 0;
971         for (i = 0; i < 8; i++) {
972                 kfree(device->path[i].conf_data);
973                 device->path[i].conf_data = NULL;
974                 device->path[i].cssid = 0;
975                 device->path[i].ssid = 0;
976                 device->path[i].chpid = 0;
977         }
978 }
979
980
981 static int dasd_eckd_read_conf(struct dasd_device *device)
982 {
983         void *conf_data;
984         int conf_len, conf_data_saved;
985         int rc, path_err, pos;
986         __u8 lpm, opm;
987         struct dasd_eckd_private *private, path_private;
988         struct dasd_uid *uid;
989         char print_path_uid[60], print_device_uid[60];
990         struct channel_path_desc_fmt0 *chp_desc;
991         struct subchannel_id sch_id;
992
993         private = device->private;
994         opm = ccw_device_get_path_mask(device->cdev);
995         ccw_device_get_schid(device->cdev, &sch_id);
996         conf_data_saved = 0;
997         path_err = 0;
998         /* get configuration data per operational path */
999         for (lpm = 0x80; lpm; lpm>>= 1) {
1000                 if (!(lpm & opm))
1001                         continue;
1002                 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1003                                              &conf_len, lpm);
1004                 if (rc && rc != -EOPNOTSUPP) {  /* -EOPNOTSUPP is ok */
1005                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1006                                         "Read configuration data returned "
1007                                         "error %d", rc);
1008                         return rc;
1009                 }
1010                 if (conf_data == NULL) {
1011                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1012                                         "No configuration data "
1013                                         "retrieved");
1014                         /* no further analysis possible */
1015                         dasd_path_add_opm(device, opm);
1016                         continue;       /* no error */
1017                 }
1018                 /* save first valid configuration data */
1019                 if (!conf_data_saved) {
1020                         /* initially clear previously stored conf_data */
1021                         dasd_eckd_clear_conf_data(device);
1022                         private->conf_data = conf_data;
1023                         private->conf_len = conf_len;
1024                         if (dasd_eckd_identify_conf_parts(private)) {
1025                                 private->conf_data = NULL;
1026                                 private->conf_len = 0;
1027                                 kfree(conf_data);
1028                                 continue;
1029                         }
1030                         pos = pathmask_to_pos(lpm);
1031                         /* store per path conf_data */
1032                         device->path[pos].conf_data = conf_data;
1033                         device->path[pos].cssid = sch_id.cssid;
1034                         device->path[pos].ssid = sch_id.ssid;
1035                         chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1036                         if (chp_desc)
1037                                 device->path[pos].chpid = chp_desc->chpid;
1038                         kfree(chp_desc);
1039                         /*
1040                          * build device UID that other path data
1041                          * can be compared to it
1042                          */
1043                         dasd_eckd_generate_uid(device);
1044                         conf_data_saved++;
1045                 } else {
1046                         path_private.conf_data = conf_data;
1047                         path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1048                         if (dasd_eckd_identify_conf_parts(
1049                                     &path_private)) {
1050                                 path_private.conf_data = NULL;
1051                                 path_private.conf_len = 0;
1052                                 kfree(conf_data);
1053                                 continue;
1054                         }
1055                         if (dasd_eckd_compare_path_uid(
1056                                     device, &path_private)) {
1057                                 uid = &path_private.uid;
1058                                 if (strlen(uid->vduit) > 0)
1059                                         snprintf(print_path_uid,
1060                                                  sizeof(print_path_uid),
1061                                                  "%s.%s.%04x.%02x.%s",
1062                                                  uid->vendor, uid->serial,
1063                                                  uid->ssid, uid->real_unit_addr,
1064                                                  uid->vduit);
1065                                 else
1066                                         snprintf(print_path_uid,
1067                                                  sizeof(print_path_uid),
1068                                                  "%s.%s.%04x.%02x",
1069                                                  uid->vendor, uid->serial,
1070                                                  uid->ssid,
1071                                                  uid->real_unit_addr);
1072                                 uid = &private->uid;
1073                                 if (strlen(uid->vduit) > 0)
1074                                         snprintf(print_device_uid,
1075                                                  sizeof(print_device_uid),
1076                                                  "%s.%s.%04x.%02x.%s",
1077                                                  uid->vendor, uid->serial,
1078                                                  uid->ssid, uid->real_unit_addr,
1079                                                  uid->vduit);
1080                                 else
1081                                         snprintf(print_device_uid,
1082                                                  sizeof(print_device_uid),
1083                                                  "%s.%s.%04x.%02x",
1084                                                  uid->vendor, uid->serial,
1085                                                  uid->ssid,
1086                                                  uid->real_unit_addr);
1087                                 dev_err(&device->cdev->dev,
1088                                         "Not all channel paths lead to "
1089                                         "the same device, path %02X leads to "
1090                                         "device %s instead of %s\n", lpm,
1091                                         print_path_uid, print_device_uid);
1092                                 path_err = -EINVAL;
1093                                 dasd_path_add_cablepm(device, lpm);
1094                                 continue;
1095                         }
1096                         pos = pathmask_to_pos(lpm);
1097                         /* store per path conf_data */
1098                         device->path[pos].conf_data = conf_data;
1099                         device->path[pos].cssid = sch_id.cssid;
1100                         device->path[pos].ssid = sch_id.ssid;
1101                         chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1102                         if (chp_desc)
1103                                 device->path[pos].chpid = chp_desc->chpid;
1104                         kfree(chp_desc);
1105                         path_private.conf_data = NULL;
1106                         path_private.conf_len = 0;
1107                 }
1108                 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1109                 case 0x02:
1110                         dasd_path_add_nppm(device, lpm);
1111                         break;
1112                 case 0x03:
1113                         dasd_path_add_ppm(device, lpm);
1114                         break;
1115                 }
1116                 if (!dasd_path_get_opm(device)) {
1117                         dasd_path_set_opm(device, lpm);
1118                         dasd_generic_path_operational(device);
1119                 } else {
1120                         dasd_path_add_opm(device, lpm);
1121                 }
1122         }
1123
1124         return path_err;
1125 }
1126
1127 static u32 get_fcx_max_data(struct dasd_device *device)
1128 {
1129         struct dasd_eckd_private *private = device->private;
1130         int fcx_in_css, fcx_in_gneq, fcx_in_features;
1131         int tpm, mdc;
1132
1133         if (dasd_nofcx)
1134                 return 0;
1135         /* is transport mode supported? */
1136         fcx_in_css = css_general_characteristics.fcx;
1137         fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1138         fcx_in_features = private->features.feature[40] & 0x80;
1139         tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1140
1141         if (!tpm)
1142                 return 0;
1143
1144         mdc = ccw_device_get_mdc(device->cdev, 0);
1145         if (mdc < 0) {
1146                 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1147                 return 0;
1148         } else {
1149                 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1150         }
1151 }
1152
1153 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1154 {
1155         struct dasd_eckd_private *private = device->private;
1156         int mdc;
1157         u32 fcx_max_data;
1158
1159         if (private->fcx_max_data) {
1160                 mdc = ccw_device_get_mdc(device->cdev, lpm);
1161                 if ((mdc < 0)) {
1162                         dev_warn(&device->cdev->dev,
1163                                  "Detecting the maximum data size for zHPF "
1164                                  "requests failed (rc=%d) for a new path %x\n",
1165                                  mdc, lpm);
1166                         return mdc;
1167                 }
1168                 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1169                 if (fcx_max_data < private->fcx_max_data) {
1170                         dev_warn(&device->cdev->dev,
1171                                  "The maximum data size for zHPF requests %u "
1172                                  "on a new path %x is below the active maximum "
1173                                  "%u\n", fcx_max_data, lpm,
1174                                  private->fcx_max_data);
1175                         return -EACCES;
1176                 }
1177         }
1178         return 0;
1179 }
1180
1181 static int rebuild_device_uid(struct dasd_device *device,
1182                               struct path_verification_work_data *data)
1183 {
1184         struct dasd_eckd_private *private = device->private;
1185         __u8 lpm, opm = dasd_path_get_opm(device);
1186         int rc = -ENODEV;
1187
1188         for (lpm = 0x80; lpm; lpm >>= 1) {
1189                 if (!(lpm & opm))
1190                         continue;
1191                 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1192                 memset(&data->cqr, 0, sizeof(data->cqr));
1193                 data->cqr.cpaddr = &data->ccw;
1194                 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1195                                                      data->rcd_buffer,
1196                                                      lpm);
1197
1198                 if (rc) {
1199                         if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1200                                 continue;
1201                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1202                                         "Read configuration data "
1203                                         "returned error %d", rc);
1204                         break;
1205                 }
1206                 memcpy(private->conf_data, data->rcd_buffer,
1207                        DASD_ECKD_RCD_DATA_SIZE);
1208                 if (dasd_eckd_identify_conf_parts(private)) {
1209                         rc = -ENODEV;
1210                 } else /* first valid path is enough */
1211                         break;
1212         }
1213
1214         if (!rc)
1215                 rc = dasd_eckd_generate_uid(device);
1216
1217         return rc;
1218 }
1219
1220 static void do_path_verification_work(struct work_struct *work)
1221 {
1222         struct path_verification_work_data *data;
1223         struct dasd_device *device;
1224         struct dasd_eckd_private path_private;
1225         struct dasd_uid *uid;
1226         __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1227         __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1228         unsigned long flags;
1229         char print_uid[60];
1230         int rc;
1231
1232         data = container_of(work, struct path_verification_work_data, worker);
1233         device = data->device;
1234
1235         /* delay path verification until device was resumed */
1236         if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1237                 schedule_work(work);
1238                 return;
1239         }
1240         /* check if path verification already running and delay if so */
1241         if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1242                 schedule_work(work);
1243                 return;
1244         }
1245         opm = 0;
1246         npm = 0;
1247         ppm = 0;
1248         epm = 0;
1249         hpfpm = 0;
1250         cablepm = 0;
1251
1252         for (lpm = 0x80; lpm; lpm >>= 1) {
1253                 if (!(lpm & data->tbvpm))
1254                         continue;
1255                 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1256                 memset(&data->cqr, 0, sizeof(data->cqr));
1257                 data->cqr.cpaddr = &data->ccw;
1258                 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1259                                                      data->rcd_buffer,
1260                                                      lpm);
1261                 if (!rc) {
1262                         switch (dasd_eckd_path_access(data->rcd_buffer,
1263                                                       DASD_ECKD_RCD_DATA_SIZE)
1264                                 ) {
1265                         case 0x02:
1266                                 npm |= lpm;
1267                                 break;
1268                         case 0x03:
1269                                 ppm |= lpm;
1270                                 break;
1271                         }
1272                         opm |= lpm;
1273                 } else if (rc == -EOPNOTSUPP) {
1274                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1275                                         "path verification: No configuration "
1276                                         "data retrieved");
1277                         opm |= lpm;
1278                 } else if (rc == -EAGAIN) {
1279                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1280                                         "path verification: device is stopped,"
1281                                         " try again later");
1282                         epm |= lpm;
1283                 } else {
1284                         dev_warn(&device->cdev->dev,
1285                                  "Reading device feature codes failed "
1286                                  "(rc=%d) for new path %x\n", rc, lpm);
1287                         continue;
1288                 }
1289                 if (verify_fcx_max_data(device, lpm)) {
1290                         opm &= ~lpm;
1291                         npm &= ~lpm;
1292                         ppm &= ~lpm;
1293                         hpfpm |= lpm;
1294                         continue;
1295                 }
1296
1297                 /*
1298                  * save conf_data for comparison after
1299                  * rebuild_device_uid may have changed
1300                  * the original data
1301                  */
1302                 memcpy(&path_rcd_buf, data->rcd_buffer,
1303                        DASD_ECKD_RCD_DATA_SIZE);
1304                 path_private.conf_data = (void *) &path_rcd_buf;
1305                 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1306                 if (dasd_eckd_identify_conf_parts(&path_private)) {
1307                         path_private.conf_data = NULL;
1308                         path_private.conf_len = 0;
1309                         continue;
1310                 }
1311
1312                 /*
1313                  * compare path UID with device UID only if at least
1314                  * one valid path is left
1315                  * in other case the device UID may have changed and
1316                  * the first working path UID will be used as device UID
1317                  */
1318                 if (dasd_path_get_opm(device) &&
1319                     dasd_eckd_compare_path_uid(device, &path_private)) {
1320                         /*
1321                          * the comparison was not successful
1322                          * rebuild the device UID with at least one
1323                          * known path in case a z/VM hyperswap command
1324                          * has changed the device
1325                          *
1326                          * after this compare again
1327                          *
1328                          * if either the rebuild or the recompare fails
1329                          * the path can not be used
1330                          */
1331                         if (rebuild_device_uid(device, data) ||
1332                             dasd_eckd_compare_path_uid(
1333                                     device, &path_private)) {
1334                                 uid = &path_private.uid;
1335                                 if (strlen(uid->vduit) > 0)
1336                                         snprintf(print_uid, sizeof(print_uid),
1337                                                  "%s.%s.%04x.%02x.%s",
1338                                                  uid->vendor, uid->serial,
1339                                                  uid->ssid, uid->real_unit_addr,
1340                                                  uid->vduit);
1341                                 else
1342                                         snprintf(print_uid, sizeof(print_uid),
1343                                                  "%s.%s.%04x.%02x",
1344                                                  uid->vendor, uid->serial,
1345                                                  uid->ssid,
1346                                                  uid->real_unit_addr);
1347                                 dev_err(&device->cdev->dev,
1348                                         "The newly added channel path %02X "
1349                                         "will not be used because it leads "
1350                                         "to a different device %s\n",
1351                                         lpm, print_uid);
1352                                 opm &= ~lpm;
1353                                 npm &= ~lpm;
1354                                 ppm &= ~lpm;
1355                                 cablepm |= lpm;
1356                                 continue;
1357                         }
1358                 }
1359
1360                 /*
1361                  * There is a small chance that a path is lost again between
1362                  * above path verification and the following modification of
1363                  * the device opm mask. We could avoid that race here by using
1364                  * yet another path mask, but we rather deal with this unlikely
1365                  * situation in dasd_start_IO.
1366                  */
1367                 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1368                 if (!dasd_path_get_opm(device) && opm) {
1369                         dasd_path_set_opm(device, opm);
1370                         dasd_generic_path_operational(device);
1371                 } else {
1372                         dasd_path_add_opm(device, opm);
1373                 }
1374                 dasd_path_add_nppm(device, npm);
1375                 dasd_path_add_ppm(device, ppm);
1376                 dasd_path_add_tbvpm(device, epm);
1377                 dasd_path_add_cablepm(device, cablepm);
1378                 dasd_path_add_nohpfpm(device, hpfpm);
1379                 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1380         }
1381         clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1382         dasd_put_device(device);
1383         if (data->isglobal)
1384                 mutex_unlock(&dasd_path_verification_mutex);
1385         else
1386                 kfree(data);
1387 }
1388
1389 static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
1390 {
1391         struct path_verification_work_data *data;
1392
1393         data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1394         if (!data) {
1395                 if (mutex_trylock(&dasd_path_verification_mutex)) {
1396                         data = path_verification_worker;
1397                         data->isglobal = 1;
1398                 } else
1399                         return -ENOMEM;
1400         } else {
1401                 memset(data, 0, sizeof(*data));
1402                 data->isglobal = 0;
1403         }
1404         INIT_WORK(&data->worker, do_path_verification_work);
1405         dasd_get_device(device);
1406         data->device = device;
1407         data->tbvpm = lpm;
1408         schedule_work(&data->worker);
1409         return 0;
1410 }
1411
1412 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1413 {
1414         struct dasd_eckd_private *private = device->private;
1415         unsigned long flags;
1416
1417         if (!private->fcx_max_data)
1418                 private->fcx_max_data = get_fcx_max_data(device);
1419         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1420         dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1421         dasd_schedule_device_bh(device);
1422         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1423 }
1424
1425 static int dasd_eckd_read_features(struct dasd_device *device)
1426 {
1427         struct dasd_eckd_private *private = device->private;
1428         struct dasd_psf_prssd_data *prssdp;
1429         struct dasd_rssd_features *features;
1430         struct dasd_ccw_req *cqr;
1431         struct ccw1 *ccw;
1432         int rc;
1433
1434         memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1435         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1436                                    (sizeof(struct dasd_psf_prssd_data) +
1437                                     sizeof(struct dasd_rssd_features)),
1438                                    device, NULL);
1439         if (IS_ERR(cqr)) {
1440                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1441                                 "allocate initialization request");
1442                 return PTR_ERR(cqr);
1443         }
1444         cqr->startdev = device;
1445         cqr->memdev = device;
1446         cqr->block = NULL;
1447         cqr->retries = 256;
1448         cqr->expires = 10 * HZ;
1449
1450         /* Prepare for Read Subsystem Data */
1451         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1452         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1453         prssdp->order = PSF_ORDER_PRSSD;
1454         prssdp->suborder = 0x41;        /* Read Feature Codes */
1455         /* all other bytes of prssdp must be zero */
1456
1457         ccw = cqr->cpaddr;
1458         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1459         ccw->count = sizeof(struct dasd_psf_prssd_data);
1460         ccw->flags |= CCW_FLAG_CC;
1461         ccw->cda = (__u32)(addr_t) prssdp;
1462
1463         /* Read Subsystem Data - feature codes */
1464         features = (struct dasd_rssd_features *) (prssdp + 1);
1465         memset(features, 0, sizeof(struct dasd_rssd_features));
1466
1467         ccw++;
1468         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1469         ccw->count = sizeof(struct dasd_rssd_features);
1470         ccw->cda = (__u32)(addr_t) features;
1471
1472         cqr->buildclk = get_tod_clock();
1473         cqr->status = DASD_CQR_FILLED;
1474         rc = dasd_sleep_on(cqr);
1475         if (rc == 0) {
1476                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1477                 features = (struct dasd_rssd_features *) (prssdp + 1);
1478                 memcpy(&private->features, features,
1479                        sizeof(struct dasd_rssd_features));
1480         } else
1481                 dev_warn(&device->cdev->dev, "Reading device feature codes"
1482                          " failed with rc=%d\n", rc);
1483         dasd_sfree_request(cqr, cqr->memdev);
1484         return rc;
1485 }
1486
1487 /* Read Volume Information - Volume Storage Query */
1488 static int dasd_eckd_read_vol_info(struct dasd_device *device)
1489 {
1490         struct dasd_eckd_private *private = device->private;
1491         struct dasd_psf_prssd_data *prssdp;
1492         struct dasd_rssd_vsq *vsq;
1493         struct dasd_ccw_req *cqr;
1494         struct ccw1 *ccw;
1495         int useglobal;
1496         int rc;
1497
1498         /* This command cannot be executed on an alias device */
1499         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1500             private->uid.type == UA_HYPER_PAV_ALIAS)
1501                 return 0;
1502
1503         useglobal = 0;
1504         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1505                                    sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1506         if (IS_ERR(cqr)) {
1507                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1508                                 "Could not allocate initialization request");
1509                 mutex_lock(&dasd_vol_info_mutex);
1510                 useglobal = 1;
1511                 cqr = &dasd_vol_info_req->cqr;
1512                 memset(cqr, 0, sizeof(*cqr));
1513                 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
1514                 cqr->cpaddr = &dasd_vol_info_req->ccw;
1515                 cqr->data = &dasd_vol_info_req->data;
1516                 cqr->magic = DASD_ECKD_MAGIC;
1517         }
1518
1519         /* Prepare for Read Subsystem Data */
1520         prssdp = cqr->data;
1521         prssdp->order = PSF_ORDER_PRSSD;
1522         prssdp->suborder = PSF_SUBORDER_VSQ;    /* Volume Storage Query */
1523         prssdp->lss = private->ned->ID;
1524         prssdp->volume = private->ned->unit_addr;
1525
1526         ccw = cqr->cpaddr;
1527         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1528         ccw->count = sizeof(*prssdp);
1529         ccw->flags |= CCW_FLAG_CC;
1530         ccw->cda = (__u32)(addr_t)prssdp;
1531
1532         /* Read Subsystem Data - Volume Storage Query */
1533         vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1534         memset(vsq, 0, sizeof(*vsq));
1535
1536         ccw++;
1537         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1538         ccw->count = sizeof(*vsq);
1539         ccw->flags |= CCW_FLAG_SLI;
1540         ccw->cda = (__u32)(addr_t)vsq;
1541
1542         cqr->buildclk = get_tod_clock();
1543         cqr->status = DASD_CQR_FILLED;
1544         cqr->startdev = device;
1545         cqr->memdev = device;
1546         cqr->block = NULL;
1547         cqr->retries = 256;
1548         cqr->expires = device->default_expires * HZ;
1549         /* The command might not be supported. Suppress the error output */
1550         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1551
1552         rc = dasd_sleep_on_interruptible(cqr);
1553         if (rc == 0) {
1554                 memcpy(&private->vsq, vsq, sizeof(*vsq));
1555         } else {
1556                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1557                                 "Reading the volume storage information failed with rc=%d", rc);
1558         }
1559
1560         if (useglobal)
1561                 mutex_unlock(&dasd_vol_info_mutex);
1562         else
1563                 dasd_sfree_request(cqr, cqr->memdev);
1564
1565         return rc;
1566 }
1567
1568 static int dasd_eckd_is_ese(struct dasd_device *device)
1569 {
1570         struct dasd_eckd_private *private = device->private;
1571
1572         return private->vsq.vol_info.ese;
1573 }
1574
1575 static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1576 {
1577         struct dasd_eckd_private *private = device->private;
1578
1579         return private->vsq.extent_pool_id;
1580 }
1581
1582 /*
1583  * This value represents the total amount of available space. As more space is
1584  * allocated by ESE volumes, this value will decrease.
1585  * The data for this value is therefore updated on any call.
1586  */
1587 static int dasd_eckd_space_configured(struct dasd_device *device)
1588 {
1589         struct dasd_eckd_private *private = device->private;
1590         int rc;
1591
1592         rc = dasd_eckd_read_vol_info(device);
1593
1594         return rc ? : private->vsq.space_configured;
1595 }
1596
1597 /*
1598  * The value of space allocated by an ESE volume may have changed and is
1599  * therefore updated on any call.
1600  */
1601 static int dasd_eckd_space_allocated(struct dasd_device *device)
1602 {
1603         struct dasd_eckd_private *private = device->private;
1604         int rc;
1605
1606         rc = dasd_eckd_read_vol_info(device);
1607
1608         return rc ? : private->vsq.space_allocated;
1609 }
1610
1611 static int dasd_eckd_logical_capacity(struct dasd_device *device)
1612 {
1613         struct dasd_eckd_private *private = device->private;
1614
1615         return private->vsq.logical_capacity;
1616 }
1617
1618 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
1619 {
1620         struct ext_pool_exhaust_work_data *data;
1621         struct dasd_device *device;
1622         struct dasd_device *base;
1623
1624         data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1625         device = data->device;
1626         base = data->base;
1627
1628         if (!base)
1629                 base = device;
1630         if (dasd_eckd_space_configured(base) != 0) {
1631                 dasd_generic_space_avail(device);
1632         } else {
1633                 dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
1634                 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
1635         }
1636
1637         dasd_put_device(device);
1638         kfree(data);
1639 }
1640
1641 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
1642                                       struct dasd_ccw_req *cqr)
1643 {
1644         struct ext_pool_exhaust_work_data *data;
1645
1646         data = kzalloc(sizeof(*data), GFP_ATOMIC);
1647         if (!data)
1648                 return -ENOMEM;
1649         INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1650         dasd_get_device(device);
1651         data->device = device;
1652
1653         if (cqr->block)
1654                 data->base = cqr->block->base;
1655         else if (cqr->basedev)
1656                 data->base = cqr->basedev;
1657         else
1658                 data->base = NULL;
1659
1660         schedule_work(&data->worker);
1661
1662         return 0;
1663 }
1664
1665 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1666                                         struct dasd_rssd_lcq *lcq)
1667 {
1668         struct dasd_eckd_private *private = device->private;
1669         int pool_id = dasd_eckd_ext_pool_id(device);
1670         struct dasd_ext_pool_sum eps;
1671         int i;
1672
1673         for (i = 0; i < lcq->pool_count; i++) {
1674                 eps = lcq->ext_pool_sum[i];
1675                 if (eps.pool_id == pool_id) {
1676                         memcpy(&private->eps, &eps,
1677                                sizeof(struct dasd_ext_pool_sum));
1678                 }
1679         }
1680 }
1681
1682 /* Read Extent Pool Information - Logical Configuration Query */
1683 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1684 {
1685         struct dasd_eckd_private *private = device->private;
1686         struct dasd_psf_prssd_data *prssdp;
1687         struct dasd_rssd_lcq *lcq;
1688         struct dasd_ccw_req *cqr;
1689         struct ccw1 *ccw;
1690         int rc;
1691
1692         /* This command cannot be executed on an alias device */
1693         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1694             private->uid.type == UA_HYPER_PAV_ALIAS)
1695                 return 0;
1696
1697         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1698                                    sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1699         if (IS_ERR(cqr)) {
1700                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1701                                 "Could not allocate initialization request");
1702                 return PTR_ERR(cqr);
1703         }
1704
1705         /* Prepare for Read Subsystem Data */
1706         prssdp = cqr->data;
1707         memset(prssdp, 0, sizeof(*prssdp));
1708         prssdp->order = PSF_ORDER_PRSSD;
1709         prssdp->suborder = PSF_SUBORDER_LCQ;    /* Logical Configuration Query */
1710
1711         ccw = cqr->cpaddr;
1712         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1713         ccw->count = sizeof(*prssdp);
1714         ccw->flags |= CCW_FLAG_CC;
1715         ccw->cda = (__u32)(addr_t)prssdp;
1716
1717         lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1718         memset(lcq, 0, sizeof(*lcq));
1719
1720         ccw++;
1721         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1722         ccw->count = sizeof(*lcq);
1723         ccw->flags |= CCW_FLAG_SLI;
1724         ccw->cda = (__u32)(addr_t)lcq;
1725
1726         cqr->buildclk = get_tod_clock();
1727         cqr->status = DASD_CQR_FILLED;
1728         cqr->startdev = device;
1729         cqr->memdev = device;
1730         cqr->block = NULL;
1731         cqr->retries = 256;
1732         cqr->expires = device->default_expires * HZ;
1733         /* The command might not be supported. Suppress the error output */
1734         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1735
1736         rc = dasd_sleep_on_interruptible(cqr);
1737         if (rc == 0) {
1738                 dasd_eckd_cpy_ext_pool_data(device, lcq);
1739         } else {
1740                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1741                                 "Reading the logical configuration failed with rc=%d", rc);
1742         }
1743
1744         dasd_sfree_request(cqr, cqr->memdev);
1745
1746         return rc;
1747 }
1748
1749 /*
1750  * Depending on the device type, the extent size is specified either as
1751  * cylinders per extent (CKD) or size per extent (FBA)
1752  * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1753  */
1754 static int dasd_eckd_ext_size(struct dasd_device *device)
1755 {
1756         struct dasd_eckd_private *private = device->private;
1757         struct dasd_ext_pool_sum eps = private->eps;
1758
1759         if (!eps.flags.extent_size_valid)
1760                 return 0;
1761         if (eps.extent_size.size_1G)
1762                 return 1113;
1763         if (eps.extent_size.size_16M)
1764                 return 21;
1765
1766         return 0;
1767 }
1768
1769 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1770 {
1771         struct dasd_eckd_private *private = device->private;
1772
1773         return private->eps.warn_thrshld;
1774 }
1775
1776 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1777 {
1778         struct dasd_eckd_private *private = device->private;
1779
1780         return private->eps.flags.capacity_at_warnlevel;
1781 }
1782
1783 /*
1784  * Extent Pool out of space
1785  */
1786 static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1787 {
1788         struct dasd_eckd_private *private = device->private;
1789
1790         return private->eps.flags.pool_oos;
1791 }
1792
1793 /*
1794  * Build CP for Perform Subsystem Function - SSC.
1795  */
1796 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1797                                                     int enable_pav)
1798 {
1799         struct dasd_ccw_req *cqr;
1800         struct dasd_psf_ssc_data *psf_ssc_data;
1801         struct ccw1 *ccw;
1802
1803         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1804                                   sizeof(struct dasd_psf_ssc_data),
1805                                    device, NULL);
1806
1807         if (IS_ERR(cqr)) {
1808                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1809                            "Could not allocate PSF-SSC request");
1810                 return cqr;
1811         }
1812         psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1813         psf_ssc_data->order = PSF_ORDER_SSC;
1814         psf_ssc_data->suborder = 0xc0;
1815         if (enable_pav) {
1816                 psf_ssc_data->suborder |= 0x08;
1817                 psf_ssc_data->reserved[0] = 0x88;
1818         }
1819         ccw = cqr->cpaddr;
1820         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1821         ccw->cda = (__u32)(addr_t)psf_ssc_data;
1822         ccw->count = 66;
1823
1824         cqr->startdev = device;
1825         cqr->memdev = device;
1826         cqr->block = NULL;
1827         cqr->retries = 256;
1828         cqr->expires = 10*HZ;
1829         cqr->buildclk = get_tod_clock();
1830         cqr->status = DASD_CQR_FILLED;
1831         return cqr;
1832 }
1833
1834 /*
1835  * Perform Subsystem Function.
1836  * It is necessary to trigger CIO for channel revalidation since this
1837  * call might change behaviour of DASD devices.
1838  */
1839 static int
1840 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1841                   unsigned long flags)
1842 {
1843         struct dasd_ccw_req *cqr;
1844         int rc;
1845
1846         cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1847         if (IS_ERR(cqr))
1848                 return PTR_ERR(cqr);
1849
1850         /*
1851          * set flags e.g. turn on failfast, to prevent blocking
1852          * the calling function should handle failed requests
1853          */
1854         cqr->flags |= flags;
1855
1856         rc = dasd_sleep_on(cqr);
1857         if (!rc)
1858                 /* trigger CIO to reprobe devices */
1859                 css_schedule_reprobe();
1860         else if (cqr->intrc == -EAGAIN)
1861                 rc = -EAGAIN;
1862
1863         dasd_sfree_request(cqr, cqr->memdev);
1864         return rc;
1865 }
1866
1867 /*
1868  * Valide storage server of current device.
1869  */
1870 static int dasd_eckd_validate_server(struct dasd_device *device,
1871                                      unsigned long flags)
1872 {
1873         struct dasd_eckd_private *private = device->private;
1874         int enable_pav, rc;
1875
1876         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1877             private->uid.type == UA_HYPER_PAV_ALIAS)
1878                 return 0;
1879         if (dasd_nopav || MACHINE_IS_VM)
1880                 enable_pav = 0;
1881         else
1882                 enable_pav = 1;
1883         rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1884
1885         /* may be requested feature is not available on server,
1886          * therefore just report error and go ahead */
1887         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1888                         "returned rc=%d", private->uid.ssid, rc);
1889         return rc;
1890 }
1891
1892 /*
1893  * worker to do a validate server in case of a lost pathgroup
1894  */
1895 static void dasd_eckd_do_validate_server(struct work_struct *work)
1896 {
1897         struct dasd_device *device = container_of(work, struct dasd_device,
1898                                                   kick_validate);
1899         unsigned long flags = 0;
1900
1901         set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1902         if (dasd_eckd_validate_server(device, flags)
1903             == -EAGAIN) {
1904                 /* schedule worker again if failed */
1905                 schedule_work(&device->kick_validate);
1906                 return;
1907         }
1908
1909         dasd_put_device(device);
1910 }
1911
1912 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
1913 {
1914         dasd_get_device(device);
1915         /* exit if device not online or in offline processing */
1916         if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1917            device->state < DASD_STATE_ONLINE) {
1918                 dasd_put_device(device);
1919                 return;
1920         }
1921         /* queue call to do_validate_server to the kernel event daemon. */
1922         if (!schedule_work(&device->kick_validate))
1923                 dasd_put_device(device);
1924 }
1925
1926 /*
1927  * Check device characteristics.
1928  * If the device is accessible using ECKD discipline, the device is enabled.
1929  */
1930 static int
1931 dasd_eckd_check_characteristics(struct dasd_device *device)
1932 {
1933         struct dasd_eckd_private *private = device->private;
1934         struct dasd_block *block;
1935         struct dasd_uid temp_uid;
1936         int rc, i;
1937         int readonly;
1938         unsigned long value;
1939
1940         /* setup work queue for validate server*/
1941         INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
1942         /* setup work queue for summary unit check */
1943         INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
1944
1945         if (!ccw_device_is_pathgroup(device->cdev)) {
1946                 dev_warn(&device->cdev->dev,
1947                          "A channel path group could not be established\n");
1948                 return -EIO;
1949         }
1950         if (!ccw_device_is_multipath(device->cdev)) {
1951                 dev_info(&device->cdev->dev,
1952                          "The DASD is not operating in multipath mode\n");
1953         }
1954         if (!private) {
1955                 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1956                 if (!private) {
1957                         dev_warn(&device->cdev->dev,
1958                                  "Allocating memory for private DASD data "
1959                                  "failed\n");
1960                         return -ENOMEM;
1961                 }
1962                 device->private = private;
1963         } else {
1964                 memset(private, 0, sizeof(*private));
1965         }
1966         /* Invalidate status of initial analysis. */
1967         private->init_cqr_status = -1;
1968         /* Set default cache operations. */
1969         private->attrib.operation = DASD_NORMAL_CACHE;
1970         private->attrib.nr_cyl = 0;
1971
1972         /* Read Configuration Data */
1973         rc = dasd_eckd_read_conf(device);
1974         if (rc)
1975                 goto out_err1;
1976
1977         /* set some default values */
1978         device->default_expires = DASD_EXPIRES;
1979         device->default_retries = DASD_RETRIES;
1980         device->path_thrhld = DASD_ECKD_PATH_THRHLD;
1981         device->path_interval = DASD_ECKD_PATH_INTERVAL;
1982
1983         if (private->gneq) {
1984                 value = 1;
1985                 for (i = 0; i < private->gneq->timeout.value; i++)
1986                         value = 10 * value;
1987                 value = value * private->gneq->timeout.number;
1988                 /* do not accept useless values */
1989                 if (value != 0 && value <= DASD_EXPIRES_MAX)
1990                         device->default_expires = value;
1991         }
1992
1993         dasd_eckd_get_uid(device, &temp_uid);
1994         if (temp_uid.type == UA_BASE_DEVICE) {
1995                 block = dasd_alloc_block();
1996                 if (IS_ERR(block)) {
1997                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1998                                         "could not allocate dasd "
1999                                         "block structure");
2000                         rc = PTR_ERR(block);
2001                         goto out_err1;
2002                 }
2003                 device->block = block;
2004                 block->base = device;
2005         }
2006
2007         /* register lcu with alias handling, enable PAV */
2008         rc = dasd_alias_make_device_known_to_lcu(device);
2009         if (rc)
2010                 goto out_err2;
2011
2012         dasd_eckd_validate_server(device, 0);
2013
2014         /* device may report different configuration data after LCU setup */
2015         rc = dasd_eckd_read_conf(device);
2016         if (rc)
2017                 goto out_err3;
2018
2019         /* Read Feature Codes */
2020         dasd_eckd_read_features(device);
2021
2022         /* Read Volume Information */
2023         dasd_eckd_read_vol_info(device);
2024
2025         /* Read Extent Pool Information */
2026         dasd_eckd_read_ext_pool_info(device);
2027
2028         /* Read Device Characteristics */
2029         rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
2030                                          &private->rdc_data, 64);
2031         if (rc) {
2032                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2033                                 "Read device characteristic failed, rc=%d", rc);
2034                 goto out_err3;
2035         }
2036
2037         if ((device->features & DASD_FEATURE_USERAW) &&
2038             !(private->rdc_data.facilities.RT_in_LR)) {
2039                 dev_err(&device->cdev->dev, "The storage server does not "
2040                         "support raw-track access\n");
2041                 rc = -EINVAL;
2042                 goto out_err3;
2043         }
2044
2045         /* find the valid cylinder size */
2046         if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
2047             private->rdc_data.long_no_cyl)
2048                 private->real_cyl = private->rdc_data.long_no_cyl;
2049         else
2050                 private->real_cyl = private->rdc_data.no_cyl;
2051
2052         private->fcx_max_data = get_fcx_max_data(device);
2053
2054         readonly = dasd_device_is_ro(device);
2055         if (readonly)
2056                 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2057
2058         dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2059                  "with %d cylinders, %d heads, %d sectors%s\n",
2060                  private->rdc_data.dev_type,
2061                  private->rdc_data.dev_model,
2062                  private->rdc_data.cu_type,
2063                  private->rdc_data.cu_model.model,
2064                  private->real_cyl,
2065                  private->rdc_data.trk_per_cyl,
2066                  private->rdc_data.sec_per_trk,
2067                  readonly ? ", read-only device" : "");
2068         return 0;
2069
2070 out_err3:
2071         dasd_alias_disconnect_device_from_lcu(device);
2072 out_err2:
2073         dasd_free_block(device->block);
2074         device->block = NULL;
2075 out_err1:
2076         kfree(private->conf_data);
2077         kfree(device->private);
2078         device->private = NULL;
2079         return rc;
2080 }
2081
2082 static void dasd_eckd_uncheck_device(struct dasd_device *device)
2083 {
2084         struct dasd_eckd_private *private = device->private;
2085         int i;
2086
2087         if (!private)
2088                 return;
2089
2090         dasd_alias_disconnect_device_from_lcu(device);
2091         private->ned = NULL;
2092         private->sneq = NULL;
2093         private->vdsneq = NULL;
2094         private->gneq = NULL;
2095         private->conf_len = 0;
2096         for (i = 0; i < 8; i++) {
2097                 kfree(device->path[i].conf_data);
2098                 if ((__u8 *)device->path[i].conf_data ==
2099                     private->conf_data) {
2100                         private->conf_data = NULL;
2101                         private->conf_len = 0;
2102                 }
2103                 device->path[i].conf_data = NULL;
2104                 device->path[i].cssid = 0;
2105                 device->path[i].ssid = 0;
2106                 device->path[i].chpid = 0;
2107         }
2108         kfree(private->conf_data);
2109         private->conf_data = NULL;
2110 }
2111
2112 static struct dasd_ccw_req *
2113 dasd_eckd_analysis_ccw(struct dasd_device *device)
2114 {
2115         struct dasd_eckd_private *private = device->private;
2116         struct eckd_count *count_data;
2117         struct LO_eckd_data *LO_data;
2118         struct dasd_ccw_req *cqr;
2119         struct ccw1 *ccw;
2120         int cplength, datasize;
2121         int i;
2122
2123         cplength = 8;
2124         datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2125         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2126                                    NULL);
2127         if (IS_ERR(cqr))
2128                 return cqr;
2129         ccw = cqr->cpaddr;
2130         /* Define extent for the first 2 tracks. */
2131         define_extent(ccw++, cqr->data, 0, 1,
2132                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2133         LO_data = cqr->data + sizeof(struct DE_eckd_data);
2134         /* Locate record for the first 4 records on track 0. */
2135         ccw[-1].flags |= CCW_FLAG_CC;
2136         locate_record(ccw++, LO_data++, 0, 0, 4,
2137                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2138
2139         count_data = private->count_area;
2140         for (i = 0; i < 4; i++) {
2141                 ccw[-1].flags |= CCW_FLAG_CC;
2142                 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2143                 ccw->flags = 0;
2144                 ccw->count = 8;
2145                 ccw->cda = (__u32)(addr_t) count_data;
2146                 ccw++;
2147                 count_data++;
2148         }
2149
2150         /* Locate record for the first record on track 1. */
2151         ccw[-1].flags |= CCW_FLAG_CC;
2152         locate_record(ccw++, LO_data++, 1, 0, 1,
2153                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2154         /* Read count ccw. */
2155         ccw[-1].flags |= CCW_FLAG_CC;
2156         ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2157         ccw->flags = 0;
2158         ccw->count = 8;
2159         ccw->cda = (__u32)(addr_t) count_data;
2160
2161         cqr->block = NULL;
2162         cqr->startdev = device;
2163         cqr->memdev = device;
2164         cqr->retries = 255;
2165         cqr->buildclk = get_tod_clock();
2166         cqr->status = DASD_CQR_FILLED;
2167         /* Set flags to suppress output for expected errors */
2168         set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2169
2170         return cqr;
2171 }
2172
2173 /* differentiate between 'no record found' and any other error */
2174 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2175 {
2176         char *sense;
2177         if (init_cqr->status == DASD_CQR_DONE)
2178                 return INIT_CQR_OK;
2179         else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2180                  init_cqr->status == DASD_CQR_FAILED) {
2181                 sense = dasd_get_sense(&init_cqr->irb);
2182                 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2183                         return INIT_CQR_UNFORMATTED;
2184                 else
2185                         return INIT_CQR_ERROR;
2186         } else
2187                 return INIT_CQR_ERROR;
2188 }
2189
2190 /*
2191  * This is the callback function for the init_analysis cqr. It saves
2192  * the status of the initial analysis ccw before it frees it and kicks
2193  * the device to continue the startup sequence. This will call
2194  * dasd_eckd_do_analysis again (if the devices has not been marked
2195  * for deletion in the meantime).
2196  */
2197 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2198                                         void *data)
2199 {
2200         struct dasd_device *device = init_cqr->startdev;
2201         struct dasd_eckd_private *private = device->private;
2202
2203         private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2204         dasd_sfree_request(init_cqr, device);
2205         dasd_kick_device(device);
2206 }
2207
2208 static int dasd_eckd_start_analysis(struct dasd_block *block)
2209 {
2210         struct dasd_ccw_req *init_cqr;
2211
2212         init_cqr = dasd_eckd_analysis_ccw(block->base);
2213         if (IS_ERR(init_cqr))
2214                 return PTR_ERR(init_cqr);
2215         init_cqr->callback = dasd_eckd_analysis_callback;
2216         init_cqr->callback_data = NULL;
2217         init_cqr->expires = 5*HZ;
2218         /* first try without ERP, so we can later handle unformatted
2219          * devices as special case
2220          */
2221         clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2222         init_cqr->retries = 0;
2223         dasd_add_request_head(init_cqr);
2224         return -EAGAIN;
2225 }
2226
2227 static int dasd_eckd_end_analysis(struct dasd_block *block)
2228 {
2229         struct dasd_device *device = block->base;
2230         struct dasd_eckd_private *private = device->private;
2231         struct eckd_count *count_area;
2232         unsigned int sb, blk_per_trk;
2233         int status, i;
2234         struct dasd_ccw_req *init_cqr;
2235
2236         status = private->init_cqr_status;
2237         private->init_cqr_status = -1;
2238         if (status == INIT_CQR_ERROR) {
2239                 /* try again, this time with full ERP */
2240                 init_cqr = dasd_eckd_analysis_ccw(device);
2241                 dasd_sleep_on(init_cqr);
2242                 status = dasd_eckd_analysis_evaluation(init_cqr);
2243                 dasd_sfree_request(init_cqr, device);
2244         }
2245
2246         if (device->features & DASD_FEATURE_USERAW) {
2247                 block->bp_block = DASD_RAW_BLOCKSIZE;
2248                 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2249                 block->s2b_shift = 3;
2250                 goto raw;
2251         }
2252
2253         if (status == INIT_CQR_UNFORMATTED) {
2254                 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2255                 return -EMEDIUMTYPE;
2256         } else if (status == INIT_CQR_ERROR) {
2257                 dev_err(&device->cdev->dev,
2258                         "Detecting the DASD disk layout failed because "
2259                         "of an I/O error\n");
2260                 return -EIO;
2261         }
2262
2263         private->uses_cdl = 1;
2264         /* Check Track 0 for Compatible Disk Layout */
2265         count_area = NULL;
2266         for (i = 0; i < 3; i++) {
2267                 if (private->count_area[i].kl != 4 ||
2268                     private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2269                     private->count_area[i].cyl != 0 ||
2270                     private->count_area[i].head != count_area_head[i] ||
2271                     private->count_area[i].record != count_area_rec[i]) {
2272                         private->uses_cdl = 0;
2273                         break;
2274                 }
2275         }
2276         if (i == 3)
2277                 count_area = &private->count_area[3];
2278
2279         if (private->uses_cdl == 0) {
2280                 for (i = 0; i < 5; i++) {
2281                         if ((private->count_area[i].kl != 0) ||
2282                             (private->count_area[i].dl !=
2283                              private->count_area[0].dl) ||
2284                             private->count_area[i].cyl !=  0 ||
2285                             private->count_area[i].head != count_area_head[i] ||
2286                             private->count_area[i].record != count_area_rec[i])
2287                                 break;
2288                 }
2289                 if (i == 5)
2290                         count_area = &private->count_area[0];
2291         } else {
2292                 if (private->count_area[3].record == 1)
2293                         dev_warn(&device->cdev->dev,
2294                                  "Track 0 has no records following the VTOC\n");
2295         }
2296
2297         if (count_area != NULL && count_area->kl == 0) {
2298                 /* we found notthing violating our disk layout */
2299                 if (dasd_check_blocksize(count_area->dl) == 0)
2300                         block->bp_block = count_area->dl;
2301         }
2302         if (block->bp_block == 0) {
2303                 dev_warn(&device->cdev->dev,
2304                          "The disk layout of the DASD is not supported\n");
2305                 return -EMEDIUMTYPE;
2306         }
2307         block->s2b_shift = 0;   /* bits to shift 512 to get a block */
2308         for (sb = 512; sb < block->bp_block; sb = sb << 1)
2309                 block->s2b_shift++;
2310
2311         blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2312
2313 raw:
2314         block->blocks = ((unsigned long) private->real_cyl *
2315                           private->rdc_data.trk_per_cyl *
2316                           blk_per_trk);
2317
2318         dev_info(&device->cdev->dev,
2319                  "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2320                  "%s\n", (block->bp_block >> 10),
2321                  (((unsigned long) private->real_cyl *
2322                    private->rdc_data.trk_per_cyl *
2323                    blk_per_trk * (block->bp_block >> 9)) >> 1),
2324                  ((blk_per_trk * block->bp_block) >> 10),
2325                  private->uses_cdl ?
2326                  "compatible disk layout" : "linux disk layout");
2327
2328         return 0;
2329 }
2330
2331 static int dasd_eckd_do_analysis(struct dasd_block *block)
2332 {
2333         struct dasd_eckd_private *private = block->base->private;
2334
2335         if (private->init_cqr_status < 0)
2336                 return dasd_eckd_start_analysis(block);
2337         else
2338                 return dasd_eckd_end_analysis(block);
2339 }
2340
2341 static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2342 {
2343         return dasd_alias_add_device(device);
2344 };
2345
2346 static int dasd_eckd_online_to_ready(struct dasd_device *device)
2347 {
2348         if (cancel_work_sync(&device->reload_device))
2349                 dasd_put_device(device);
2350         if (cancel_work_sync(&device->kick_validate))
2351                 dasd_put_device(device);
2352
2353         return 0;
2354 };
2355
2356 static int dasd_eckd_basic_to_known(struct dasd_device *device)
2357 {
2358         return dasd_alias_remove_device(device);
2359 };
2360
2361 static int
2362 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2363 {
2364         struct dasd_eckd_private *private = block->base->private;
2365
2366         if (dasd_check_blocksize(block->bp_block) == 0) {
2367                 geo->sectors = recs_per_track(&private->rdc_data,
2368                                               0, block->bp_block);
2369         }
2370         geo->cylinders = private->rdc_data.no_cyl;
2371         geo->heads = private->rdc_data.trk_per_cyl;
2372         return 0;
2373 }
2374
2375 /*
2376  * Build the TCW request for the format check
2377  */
2378 static struct dasd_ccw_req *
2379 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2380                           int enable_pav, struct eckd_count *fmt_buffer,
2381                           int rpt)
2382 {
2383         struct dasd_eckd_private *start_priv;
2384         struct dasd_device *startdev = NULL;
2385         struct tidaw *last_tidaw = NULL;
2386         struct dasd_ccw_req *cqr;
2387         struct itcw *itcw;
2388         int itcw_size;
2389         int count;
2390         int rc;
2391         int i;
2392
2393         if (enable_pav)
2394                 startdev = dasd_alias_get_start_dev(base);
2395
2396         if (!startdev)
2397                 startdev = base;
2398
2399         start_priv = startdev->private;
2400
2401         count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2402
2403         /*
2404          * we're adding 'count' amount of tidaw to the itcw.
2405          * calculate the corresponding itcw_size
2406          */
2407         itcw_size = itcw_calc_size(0, count, 0);
2408
2409         cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2410         if (IS_ERR(cqr))
2411                 return cqr;
2412
2413         start_priv->count++;
2414
2415         itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2416         if (IS_ERR(itcw)) {
2417                 rc = -EINVAL;
2418                 goto out_err;
2419         }
2420
2421         cqr->cpaddr = itcw_get_tcw(itcw);
2422         rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2423                           DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2424                           sizeof(struct eckd_count),
2425                           count * sizeof(struct eckd_count), 0, rpt);
2426         if (rc)
2427                 goto out_err;
2428
2429         for (i = 0; i < count; i++) {
2430                 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2431                                             sizeof(struct eckd_count));
2432                 if (IS_ERR(last_tidaw)) {
2433                         rc = -EINVAL;
2434                         goto out_err;
2435                 }
2436         }
2437
2438         last_tidaw->flags |= TIDAW_FLAGS_LAST;
2439         itcw_finalize(itcw);
2440
2441         cqr->cpmode = 1;
2442         cqr->startdev = startdev;
2443         cqr->memdev = startdev;
2444         cqr->basedev = base;
2445         cqr->retries = startdev->default_retries;
2446         cqr->expires = startdev->default_expires * HZ;
2447         cqr->buildclk = get_tod_clock();
2448         cqr->status = DASD_CQR_FILLED;
2449         /* Set flags to suppress output for expected errors */
2450         set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2451         set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2452
2453         return cqr;
2454
2455 out_err:
2456         dasd_sfree_request(cqr, startdev);
2457
2458         return ERR_PTR(rc);
2459 }
2460
2461 /*
2462  * Build the CCW request for the format check
2463  */
2464 static struct dasd_ccw_req *
2465 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2466                       int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2467 {
2468         struct dasd_eckd_private *start_priv;
2469         struct dasd_eckd_private *base_priv;
2470         struct dasd_device *startdev = NULL;
2471         struct dasd_ccw_req *cqr;
2472         struct ccw1 *ccw;
2473         void *data;
2474         int cplength, datasize;
2475         int use_prefix;
2476         int count;
2477         int i;
2478
2479         if (enable_pav)
2480                 startdev = dasd_alias_get_start_dev(base);
2481
2482         if (!startdev)
2483                 startdev = base;
2484
2485         start_priv = startdev->private;
2486         base_priv = base->private;
2487
2488         count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2489
2490         use_prefix = base_priv->features.feature[8] & 0x01;
2491
2492         if (use_prefix) {
2493                 cplength = 1;
2494                 datasize = sizeof(struct PFX_eckd_data);
2495         } else {
2496                 cplength = 2;
2497                 datasize = sizeof(struct DE_eckd_data) +
2498                         sizeof(struct LO_eckd_data);
2499         }
2500         cplength += count;
2501
2502         cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2503         if (IS_ERR(cqr))
2504                 return cqr;
2505
2506         start_priv->count++;
2507         data = cqr->data;
2508         ccw = cqr->cpaddr;
2509
2510         if (use_prefix) {
2511                 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2512                            DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2513                            count, 0, 0);
2514         } else {
2515                 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2516                               DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2517
2518                 data += sizeof(struct DE_eckd_data);
2519                 ccw[-1].flags |= CCW_FLAG_CC;
2520
2521                 locate_record(ccw++, data, fdata->start_unit, 0, count,
2522                               DASD_ECKD_CCW_READ_COUNT, base, 0);
2523         }
2524
2525         for (i = 0; i < count; i++) {
2526                 ccw[-1].flags |= CCW_FLAG_CC;
2527                 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2528                 ccw->flags = CCW_FLAG_SLI;
2529                 ccw->count = 8;
2530                 ccw->cda = (__u32)(addr_t) fmt_buffer;
2531                 ccw++;
2532                 fmt_buffer++;
2533         }
2534
2535         cqr->startdev = startdev;
2536         cqr->memdev = startdev;
2537         cqr->basedev = base;
2538         cqr->retries = DASD_RETRIES;
2539         cqr->expires = startdev->default_expires * HZ;
2540         cqr->buildclk = get_tod_clock();
2541         cqr->status = DASD_CQR_FILLED;
2542         /* Set flags to suppress output for expected errors */
2543         set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2544
2545         return cqr;
2546 }
2547
2548 static struct dasd_ccw_req *
2549 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2550                        struct format_data_t *fdata, int enable_pav)
2551 {
2552         struct dasd_eckd_private *base_priv;
2553         struct dasd_eckd_private *start_priv;
2554         struct dasd_ccw_req *fcp;
2555         struct eckd_count *ect;
2556         struct ch_t address;
2557         struct ccw1 *ccw;
2558         void *data;
2559         int rpt;
2560         int cplength, datasize;
2561         int i, j;
2562         int intensity = 0;
2563         int r0_perm;
2564         int nr_tracks;
2565         int use_prefix;
2566
2567         if (enable_pav)
2568                 startdev = dasd_alias_get_start_dev(base);
2569
2570         if (!startdev)
2571                 startdev = base;
2572
2573         start_priv = startdev->private;
2574         base_priv = base->private;
2575
2576         rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2577
2578         nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2579
2580         /*
2581          * fdata->intensity is a bit string that tells us what to do:
2582          *   Bit 0: write record zero
2583          *   Bit 1: write home address, currently not supported
2584          *   Bit 2: invalidate tracks
2585          *   Bit 3: use OS/390 compatible disk layout (cdl)
2586          *   Bit 4: do not allow storage subsystem to modify record zero
2587          * Only some bit combinations do make sense.
2588          */
2589         if (fdata->intensity & 0x10) {
2590                 r0_perm = 0;
2591                 intensity = fdata->intensity & ~0x10;
2592         } else {
2593                 r0_perm = 1;
2594                 intensity = fdata->intensity;
2595         }
2596
2597         use_prefix = base_priv->features.feature[8] & 0x01;
2598
2599         switch (intensity) {
2600         case 0x00:      /* Normal format */
2601         case 0x08:      /* Normal format, use cdl. */
2602                 cplength = 2 + (rpt*nr_tracks);
2603                 if (use_prefix)
2604                         datasize = sizeof(struct PFX_eckd_data) +
2605                                 sizeof(struct LO_eckd_data) +
2606                                 rpt * nr_tracks * sizeof(struct eckd_count);
2607                 else
2608                         datasize = sizeof(struct DE_eckd_data) +
2609                                 sizeof(struct LO_eckd_data) +
2610                                 rpt * nr_tracks * sizeof(struct eckd_count);
2611                 break;
2612         case 0x01:      /* Write record zero and format track. */
2613         case 0x09:      /* Write record zero and format track, use cdl. */
2614                 cplength = 2 + rpt * nr_tracks;
2615                 if (use_prefix)
2616                         datasize = sizeof(struct PFX_eckd_data) +
2617                                 sizeof(struct LO_eckd_data) +
2618                                 sizeof(struct eckd_count) +
2619                                 rpt * nr_tracks * sizeof(struct eckd_count);
2620                 else
2621                         datasize = sizeof(struct DE_eckd_data) +
2622                                 sizeof(struct LO_eckd_data) +
2623                                 sizeof(struct eckd_count) +
2624                                 rpt * nr_tracks * sizeof(struct eckd_count);
2625                 break;
2626         case 0x04:      /* Invalidate track. */
2627         case 0x0c:      /* Invalidate track, use cdl. */
2628                 cplength = 3;
2629                 if (use_prefix)
2630                         datasize = sizeof(struct PFX_eckd_data) +
2631                                 sizeof(struct LO_eckd_data) +
2632                                 sizeof(struct eckd_count);
2633                 else
2634                         datasize = sizeof(struct DE_eckd_data) +
2635                                 sizeof(struct LO_eckd_data) +
2636                                 sizeof(struct eckd_count);
2637                 break;
2638         default:
2639                 dev_warn(&startdev->cdev->dev,
2640                          "An I/O control call used incorrect flags 0x%x\n",
2641                          fdata->intensity);
2642                 return ERR_PTR(-EINVAL);
2643         }
2644
2645         fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2646         if (IS_ERR(fcp))
2647                 return fcp;
2648
2649         start_priv->count++;
2650         data = fcp->data;
2651         ccw = fcp->cpaddr;
2652
2653         switch (intensity & ~0x08) {
2654         case 0x00: /* Normal format. */
2655                 if (use_prefix) {
2656                         prefix(ccw++, (struct PFX_eckd_data *) data,
2657                                fdata->start_unit, fdata->stop_unit,
2658                                DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2659                         /* grant subsystem permission to format R0 */
2660                         if (r0_perm)
2661                                 ((struct PFX_eckd_data *)data)
2662                                         ->define_extent.ga_extended |= 0x04;
2663                         data += sizeof(struct PFX_eckd_data);
2664                 } else {
2665                         define_extent(ccw++, (struct DE_eckd_data *) data,
2666                                       fdata->start_unit, fdata->stop_unit,
2667                                       DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2668                         /* grant subsystem permission to format R0 */
2669                         if (r0_perm)
2670                                 ((struct DE_eckd_data *) data)
2671                                         ->ga_extended |= 0x04;
2672                         data += sizeof(struct DE_eckd_data);
2673                 }
2674                 ccw[-1].flags |= CCW_FLAG_CC;
2675                 locate_record(ccw++, (struct LO_eckd_data *) data,
2676                               fdata->start_unit, 0, rpt*nr_tracks,
2677                               DASD_ECKD_CCW_WRITE_CKD, base,
2678                               fdata->blksize);
2679                 data += sizeof(struct LO_eckd_data);
2680                 break;
2681         case 0x01: /* Write record zero + format track. */
2682                 if (use_prefix) {
2683                         prefix(ccw++, (struct PFX_eckd_data *) data,
2684                                fdata->start_unit, fdata->stop_unit,
2685                                DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2686                                base, startdev);
2687                         data += sizeof(struct PFX_eckd_data);
2688                 } else {
2689                         define_extent(ccw++, (struct DE_eckd_data *) data,
2690                                fdata->start_unit, fdata->stop_unit,
2691                                DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2692                         data += sizeof(struct DE_eckd_data);
2693                 }
2694                 ccw[-1].flags |= CCW_FLAG_CC;
2695                 locate_record(ccw++, (struct LO_eckd_data *) data,
2696                               fdata->start_unit, 0, rpt * nr_tracks + 1,
2697                               DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2698                               base->block->bp_block);
2699                 data += sizeof(struct LO_eckd_data);
2700                 break;
2701         case 0x04: /* Invalidate track. */
2702                 if (use_prefix) {
2703                         prefix(ccw++, (struct PFX_eckd_data *) data,
2704                                fdata->start_unit, fdata->stop_unit,
2705                                DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2706                         data += sizeof(struct PFX_eckd_data);
2707                 } else {
2708                         define_extent(ccw++, (struct DE_eckd_data *) data,
2709                                fdata->start_unit, fdata->stop_unit,
2710                                DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2711                         data += sizeof(struct DE_eckd_data);
2712                 }
2713                 ccw[-1].flags |= CCW_FLAG_CC;
2714                 locate_record(ccw++, (struct LO_eckd_data *) data,
2715                               fdata->start_unit, 0, 1,
2716                               DASD_ECKD_CCW_WRITE_CKD, base, 8);
2717                 data += sizeof(struct LO_eckd_data);
2718                 break;
2719         }
2720
2721         for (j = 0; j < nr_tracks; j++) {
2722                 /* calculate cylinder and head for the current track */
2723                 set_ch_t(&address,
2724                          (fdata->start_unit + j) /
2725                          base_priv->rdc_data.trk_per_cyl,
2726                          (fdata->start_unit + j) %
2727                          base_priv->rdc_data.trk_per_cyl);
2728                 if (intensity & 0x01) { /* write record zero */
2729                         ect = (struct eckd_count *) data;
2730                         data += sizeof(struct eckd_count);
2731                         ect->cyl = address.cyl;
2732                         ect->head = address.head;
2733                         ect->record = 0;
2734                         ect->kl = 0;
2735                         ect->dl = 8;
2736                         ccw[-1].flags |= CCW_FLAG_CC;
2737                         ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2738                         ccw->flags = CCW_FLAG_SLI;
2739                         ccw->count = 8;
2740                         ccw->cda = (__u32)(addr_t) ect;
2741                         ccw++;
2742                 }
2743                 if ((intensity & ~0x08) & 0x04) {       /* erase track */
2744                         ect = (struct eckd_count *) data;
2745                         data += sizeof(struct eckd_count);
2746                         ect->cyl = address.cyl;
2747                         ect->head = address.head;
2748                         ect->record = 1;
2749                         ect->kl = 0;
2750                         ect->dl = 0;
2751                         ccw[-1].flags |= CCW_FLAG_CC;
2752                         ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2753                         ccw->flags = CCW_FLAG_SLI;
2754                         ccw->count = 8;
2755                         ccw->cda = (__u32)(addr_t) ect;
2756                 } else {                /* write remaining records */
2757                         for (i = 0; i < rpt; i++) {
2758                                 ect = (struct eckd_count *) data;
2759                                 data += sizeof(struct eckd_count);
2760                                 ect->cyl = address.cyl;
2761                                 ect->head = address.head;
2762                                 ect->record = i + 1;
2763                                 ect->kl = 0;
2764                                 ect->dl = fdata->blksize;
2765                                 /*
2766                                  * Check for special tracks 0-1
2767                                  * when formatting CDL
2768                                  */
2769                                 if ((intensity & 0x08) &&
2770                                     address.cyl == 0 && address.head == 0) {
2771                                         if (i < 3) {
2772                                                 ect->kl = 4;
2773                                                 ect->dl = sizes_trk0[i] - 4;
2774                                         }
2775                                 }
2776                                 if ((intensity & 0x08) &&
2777                                     address.cyl == 0 && address.head == 1) {
2778                                         ect->kl = 44;
2779                                         ect->dl = LABEL_SIZE - 44;
2780                                 }
2781                                 ccw[-1].flags |= CCW_FLAG_CC;
2782                                 if (i != 0 || j == 0)
2783                                         ccw->cmd_code =
2784                                                 DASD_ECKD_CCW_WRITE_CKD;
2785                                 else
2786                                         ccw->cmd_code =
2787                                                 DASD_ECKD_CCW_WRITE_CKD_MT;
2788                                 ccw->flags = CCW_FLAG_SLI;
2789                                 ccw->count = 8;
2790                                 ccw->cda = (__u32)(addr_t) ect;
2791                                 ccw++;
2792                         }
2793                 }
2794         }
2795
2796         fcp->startdev = startdev;
2797         fcp->memdev = startdev;
2798         fcp->basedev = base;
2799         fcp->retries = 256;
2800         fcp->expires = startdev->default_expires * HZ;
2801         fcp->buildclk = get_tod_clock();
2802         fcp->status = DASD_CQR_FILLED;
2803
2804         return fcp;
2805 }
2806
2807 /*
2808  * Wrapper function to build a CCW request depending on input data
2809  */
2810 static struct dasd_ccw_req *
2811 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2812                                struct format_data_t *fdata, int enable_pav,
2813                                int tpm, struct eckd_count *fmt_buffer, int rpt)
2814 {
2815         struct dasd_ccw_req *ccw_req;
2816
2817         if (!fmt_buffer) {
2818                 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2819         } else {
2820                 if (tpm)
2821                         ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2822                                                             enable_pav,
2823                                                             fmt_buffer, rpt);
2824                 else
2825                         ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2826                                                         fmt_buffer, rpt);
2827         }
2828
2829         return ccw_req;
2830 }
2831
2832 /*
2833  * Sanity checks on format_data
2834  */
2835 static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2836                                           struct format_data_t *fdata)
2837 {
2838         struct dasd_eckd_private *private = base->private;
2839
2840         if (fdata->start_unit >=
2841             (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2842                 dev_warn(&base->cdev->dev,
2843                          "Start track number %u used in formatting is too big\n",
2844                          fdata->start_unit);
2845                 return -EINVAL;
2846         }
2847         if (fdata->stop_unit >=
2848             (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2849                 dev_warn(&base->cdev->dev,
2850                          "Stop track number %u used in formatting is too big\n",
2851                          fdata->stop_unit);
2852                 return -EINVAL;
2853         }
2854         if (fdata->start_unit > fdata->stop_unit) {
2855                 dev_warn(&base->cdev->dev,
2856                          "Start track %u used in formatting exceeds end track\n",
2857                          fdata->start_unit);
2858                 return -EINVAL;
2859         }
2860         if (dasd_check_blocksize(fdata->blksize) != 0) {
2861                 dev_warn(&base->cdev->dev,
2862                          "The DASD cannot be formatted with block size %u\n",
2863                          fdata->blksize);
2864                 return -EINVAL;
2865         }
2866         return 0;
2867 }
2868
2869 /*
2870  * This function will process format_data originally coming from an IOCTL
2871  */
2872 static int dasd_eckd_format_process_data(struct dasd_device *base,
2873                                          struct format_data_t *fdata,
2874                                          int enable_pav, int tpm,
2875                                          struct eckd_count *fmt_buffer, int rpt,
2876                                          struct irb *irb)
2877 {
2878         struct dasd_eckd_private *private = base->private;
2879         struct dasd_ccw_req *cqr, *n;
2880         struct list_head format_queue;
2881         struct dasd_device *device;
2882         char *sense = NULL;
2883         int old_start, old_stop, format_step;
2884         int step, retry;
2885         int rc;
2886
2887         rc = dasd_eckd_format_sanity_checks(base, fdata);
2888         if (rc)
2889                 return rc;
2890
2891         INIT_LIST_HEAD(&format_queue);
2892
2893         old_start = fdata->start_unit;
2894         old_stop = fdata->stop_unit;
2895
2896         if (!tpm && fmt_buffer != NULL) {
2897                 /* Command Mode / Format Check */
2898                 format_step = 1;
2899         } else if (tpm && fmt_buffer != NULL) {
2900                 /* Transport Mode / Format Check */
2901                 format_step = DASD_CQR_MAX_CCW / rpt;
2902         } else {
2903                 /* Normal Formatting */
2904                 format_step = DASD_CQR_MAX_CCW /
2905                         recs_per_track(&private->rdc_data, 0, fdata->blksize);
2906         }
2907
2908         do {
2909                 retry = 0;
2910                 while (fdata->start_unit <= old_stop) {
2911                         step = fdata->stop_unit - fdata->start_unit + 1;
2912                         if (step > format_step) {
2913                                 fdata->stop_unit =
2914                                         fdata->start_unit + format_step - 1;
2915                         }
2916
2917                         cqr = dasd_eckd_format_build_ccw_req(base, fdata,
2918                                                              enable_pav, tpm,
2919                                                              fmt_buffer, rpt);
2920                         if (IS_ERR(cqr)) {
2921                                 rc = PTR_ERR(cqr);
2922                                 if (rc == -ENOMEM) {
2923                                         if (list_empty(&format_queue))
2924                                                 goto out;
2925                                         /*
2926                                          * not enough memory available, start
2927                                          * requests retry after first requests
2928                                          * were finished
2929                                          */
2930                                         retry = 1;
2931                                         break;
2932                                 }
2933                                 goto out_err;
2934                         }
2935                         list_add_tail(&cqr->blocklist, &format_queue);
2936
2937                         if (fmt_buffer) {
2938                                 step = fdata->stop_unit - fdata->start_unit + 1;
2939                                 fmt_buffer += rpt * step;
2940                         }
2941                         fdata->start_unit = fdata->stop_unit + 1;
2942                         fdata->stop_unit = old_stop;
2943                 }
2944
2945                 rc = dasd_sleep_on_queue(&format_queue);
2946
2947 out_err:
2948                 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
2949                         device = cqr->startdev;
2950                         private = device->private;
2951
2952                         if (cqr->status == DASD_CQR_FAILED) {
2953                                 /*
2954                                  * Only get sense data if called by format
2955                                  * check
2956                                  */
2957                                 if (fmt_buffer && irb) {
2958                                         sense = dasd_get_sense(&cqr->irb);
2959                                         memcpy(irb, &cqr->irb, sizeof(*irb));
2960                                 }
2961                                 rc = -EIO;
2962                         }
2963                         list_del_init(&cqr->blocklist);
2964                         dasd_ffree_request(cqr, device);
2965                         private->count--;
2966                 }
2967
2968                 if (rc && rc != -EIO)
2969                         goto out;
2970                 if (rc == -EIO) {
2971                         /*
2972                          * In case fewer than the expected records are on the
2973                          * track, we will most likely get a 'No Record Found'
2974                          * error (in command mode) or a 'File Protected' error
2975                          * (in transport mode). Those particular cases shouldn't
2976                          * pass the -EIO to the IOCTL, therefore reset the rc
2977                          * and continue.
2978                          */
2979                         if (sense &&
2980                             (sense[1] & SNS1_NO_REC_FOUND ||
2981                              sense[1] & SNS1_FILE_PROTECTED))
2982                                 retry = 1;
2983                         else
2984                                 goto out;
2985                 }
2986
2987         } while (retry);
2988
2989 out:
2990         fdata->start_unit = old_start;
2991         fdata->stop_unit = old_stop;
2992
2993         return rc;
2994 }
2995
2996 static int dasd_eckd_format_device(struct dasd_device *base,
2997                                    struct format_data_t *fdata, int enable_pav)
2998 {
2999         return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
3000                                              0, NULL);
3001 }
3002
3003 /*
3004  * Callback function to free ESE format requests.
3005  */
3006 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3007 {
3008         struct dasd_device *device = cqr->startdev;
3009         struct dasd_eckd_private *private = device->private;
3010
3011         private->count--;
3012         dasd_ffree_request(cqr, device);
3013 }
3014
3015 static struct dasd_ccw_req *
3016 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
3017 {
3018         struct dasd_eckd_private *private;
3019         struct format_data_t fdata;
3020         unsigned int recs_per_trk;
3021         struct dasd_ccw_req *fcqr;
3022         struct dasd_device *base;
3023         struct dasd_block *block;
3024         unsigned int blksize;
3025         struct request *req;
3026         sector_t first_trk;
3027         sector_t last_trk;
3028         int rc;
3029
3030         req = cqr->callback_data;
3031         base = cqr->block->base;
3032         private = base->private;
3033         block = base->block;
3034         blksize = block->bp_block;
3035         recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3036
3037         first_trk = blk_rq_pos(req) >> block->s2b_shift;
3038         sector_div(first_trk, recs_per_trk);
3039         last_trk =
3040                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3041         sector_div(last_trk, recs_per_trk);
3042
3043         fdata.start_unit = first_trk;
3044         fdata.stop_unit = last_trk;
3045         fdata.blksize = blksize;
3046         fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
3047
3048         rc = dasd_eckd_format_sanity_checks(base, &fdata);
3049         if (rc)
3050                 return ERR_PTR(-EINVAL);
3051
3052         /*
3053          * We're building the request with PAV disabled as we're reusing
3054          * the former startdev.
3055          */
3056         fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
3057         if (IS_ERR(fcqr))
3058                 return fcqr;
3059
3060         fcqr->callback = dasd_eckd_ese_format_cb;
3061
3062         return fcqr;
3063 }
3064
3065 /*
3066  * When data is read from an unformatted area of an ESE volume, this function
3067  * returns zeroed data and thereby mimics a read of zero data.
3068  */
3069 static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr)
3070 {
3071         unsigned int blksize, off;
3072         struct dasd_device *base;
3073         struct req_iterator iter;
3074         struct request *req;
3075         struct bio_vec bv;
3076         char *dst;
3077
3078         req = (struct request *) cqr->callback_data;
3079         base = cqr->block->base;
3080         blksize = base->block->bp_block;
3081
3082         rq_for_each_segment(bv, req, iter) {
3083                 dst = page_address(bv.bv_page) + bv.bv_offset;
3084                 for (off = 0; off < bv.bv_len; off += blksize) {
3085                         if (dst && rq_data_dir(req) == READ) {
3086                                 dst += off;
3087                                 memset(dst, 0, blksize);
3088                         }
3089                 }
3090         }
3091 }
3092
3093 /*
3094  * Helper function to count consecutive records of a single track.
3095  */
3096 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3097                                    int max)
3098 {
3099         int head;
3100         int i;
3101
3102         head = fmt_buffer[start].head;
3103
3104         /*
3105          * There are 3 conditions where we stop counting:
3106          * - if data reoccurs (same head and record may reoccur), which may
3107          *   happen due to the way DASD_ECKD_CCW_READ_COUNT works
3108          * - when the head changes, because we're iterating over several tracks
3109          *   then (DASD_ECKD_CCW_READ_COUNT_MT)
3110          * - when we've reached the end of sensible data in the buffer (the
3111          *   record will be 0 then)
3112          */
3113         for (i = start; i < max; i++) {
3114                 if (i > start) {
3115                         if ((fmt_buffer[i].head == head &&
3116                             fmt_buffer[i].record == 1) ||
3117                             fmt_buffer[i].head != head ||
3118                             fmt_buffer[i].record == 0)
3119                                 break;
3120                 }
3121         }
3122
3123         return i - start;
3124 }
3125
3126 /*
3127  * Evaluate a given range of tracks. Data like number of records, blocksize,
3128  * record ids, and key length are compared with expected data.
3129  *
3130  * If a mismatch occurs, the corresponding error bit is set, as well as
3131  * additional information, depending on the error.
3132  */
3133 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3134                                              struct format_check_t *cdata,
3135                                              int rpt_max, int rpt_exp,
3136                                              int trk_per_cyl, int tpm)
3137 {
3138         struct ch_t geo;
3139         int max_entries;
3140         int count = 0;
3141         int trkcount;
3142         int blksize;
3143         int pos = 0;
3144         int i, j;
3145         int kl;
3146
3147         trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3148         max_entries = trkcount * rpt_max;
3149
3150         for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3151                 /* Calculate the correct next starting position in the buffer */
3152                 if (tpm) {
3153                         while (fmt_buffer[pos].record == 0 &&
3154                                fmt_buffer[pos].dl == 0) {
3155                                 if (pos++ > max_entries)
3156                                         break;
3157                         }
3158                 } else {
3159                         if (i != cdata->expect.start_unit)
3160                                 pos += rpt_max - count;
3161                 }
3162
3163                 /* Calculate the expected geo values for the current track */
3164                 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3165
3166                 /* Count and check number of records */
3167                 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3168
3169                 if (count < rpt_exp) {
3170                         cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3171                         break;
3172                 }
3173                 if (count > rpt_exp) {
3174                         cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3175                         break;
3176                 }
3177
3178                 for (j = 0; j < count; j++, pos++) {
3179                         blksize = cdata->expect.blksize;
3180                         kl = 0;
3181
3182                         /*
3183                          * Set special values when checking CDL formatted
3184                          * devices.
3185                          */
3186                         if ((cdata->expect.intensity & 0x08) &&
3187                             geo.cyl == 0 && geo.head == 0) {
3188                                 if (j < 3) {
3189                                         blksize = sizes_trk0[j] - 4;
3190                                         kl = 4;
3191                                 }
3192                         }
3193                         if ((cdata->expect.intensity & 0x08) &&
3194                             geo.cyl == 0 && geo.head == 1) {
3195                                 blksize = LABEL_SIZE - 44;
3196                                 kl = 44;
3197                         }
3198
3199                         /* Check blocksize */
3200                         if (fmt_buffer[pos].dl != blksize) {
3201                                 cdata->result = DASD_FMT_ERR_BLKSIZE;
3202                                 goto out;
3203                         }
3204                         /* Check if key length is 0 */
3205                         if (fmt_buffer[pos].kl != kl) {
3206                                 cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3207                                 goto out;
3208                         }
3209                         /* Check if record_id is correct */
3210                         if (fmt_buffer[pos].cyl != geo.cyl ||
3211                             fmt_buffer[pos].head != geo.head ||
3212                             fmt_buffer[pos].record != (j + 1)) {
3213                                 cdata->result = DASD_FMT_ERR_RECORD_ID;
3214                                 goto out;
3215                         }
3216                 }
3217         }
3218
3219 out:
3220         /*
3221          * In case of no errors, we need to decrease by one
3222          * to get the correct positions.
3223          */
3224         if (!cdata->result) {
3225                 i--;
3226                 pos--;
3227         }
3228
3229         cdata->unit = i;
3230         cdata->num_records = count;
3231         cdata->rec = fmt_buffer[pos].record;
3232         cdata->blksize = fmt_buffer[pos].dl;
3233         cdata->key_length = fmt_buffer[pos].kl;
3234 }
3235
3236 /*
3237  * Check the format of a range of tracks of a DASD.
3238  */
3239 static int dasd_eckd_check_device_format(struct dasd_device *base,
3240                                          struct format_check_t *cdata,
3241                                          int enable_pav)
3242 {
3243         struct dasd_eckd_private *private = base->private;
3244         struct eckd_count *fmt_buffer;
3245         struct irb irb;
3246         int rpt_max, rpt_exp;
3247         int fmt_buffer_size;
3248         int trk_per_cyl;
3249         int trkcount;
3250         int tpm = 0;
3251         int rc;
3252
3253         trk_per_cyl = private->rdc_data.trk_per_cyl;
3254
3255         /* Get maximum and expected amount of records per track */
3256         rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3257         rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3258
3259         trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3260         fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3261
3262         fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3263         if (!fmt_buffer)
3264                 return -ENOMEM;
3265
3266         /*
3267          * A certain FICON feature subset is needed to operate in transport
3268          * mode. Additionally, the support for transport mode is implicitly
3269          * checked by comparing the buffer size with fcx_max_data. As long as
3270          * the buffer size is smaller we can operate in transport mode and
3271          * process multiple tracks. If not, only one track at once is being
3272          * processed using command mode.
3273          */
3274         if ((private->features.feature[40] & 0x04) &&
3275             fmt_buffer_size <= private->fcx_max_data)
3276                 tpm = 1;
3277
3278         rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3279                                            tpm, fmt_buffer, rpt_max, &irb);
3280         if (rc && rc != -EIO)
3281                 goto out;
3282         if (rc == -EIO) {
3283                 /*
3284                  * If our first attempt with transport mode enabled comes back
3285                  * with an incorrect length error, we're going to retry the
3286                  * check with command mode.
3287                  */
3288                 if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3289                         tpm = 0;
3290                         rc = dasd_eckd_format_process_data(base, &cdata->expect,
3291                                                            enable_pav, tpm,
3292                                                            fmt_buffer, rpt_max,
3293                                                            &irb);
3294                         if (rc)
3295                                 goto out;
3296                 } else {
3297                         goto out;
3298                 }
3299         }
3300
3301         dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3302                                          trk_per_cyl, tpm);
3303
3304 out:
3305         kfree(fmt_buffer);
3306
3307         return rc;
3308 }
3309
3310 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3311 {
3312         if (cqr->retries < 0) {
3313                 cqr->status = DASD_CQR_FAILED;
3314                 return;
3315         }
3316         cqr->status = DASD_CQR_FILLED;
3317         if (cqr->block && (cqr->startdev != cqr->block->base)) {
3318                 dasd_eckd_reset_ccw_to_base_io(cqr);
3319                 cqr->startdev = cqr->block->base;
3320                 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3321         }
3322 };
3323
3324 static dasd_erp_fn_t
3325 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3326 {
3327         struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3328         struct ccw_device *cdev = device->cdev;
3329
3330         switch (cdev->id.cu_type) {
3331         case 0x3990:
3332         case 0x2105:
3333         case 0x2107:
3334         case 0x1750:
3335                 return dasd_3990_erp_action;
3336         case 0x9343:
3337         case 0x3880:
3338         default:
3339                 return dasd_default_erp_action;
3340         }
3341 }
3342
3343 static dasd_erp_fn_t
3344 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3345 {
3346         return dasd_default_erp_postaction;
3347 }
3348
3349 static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3350                                               struct dasd_ccw_req *cqr,
3351                                               struct irb *irb)
3352 {
3353         char mask;
3354         char *sense = NULL;
3355         struct dasd_eckd_private *private = device->private;
3356
3357         /* first of all check for state change pending interrupt */
3358         mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3359         if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3360                 /*
3361                  * for alias only, not in offline processing
3362                  * and only if not suspended
3363                  */
3364                 if (!device->block && private->lcu &&
3365                     device->state == DASD_STATE_ONLINE &&
3366                     !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3367                     !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3368                         /* schedule worker to reload device */
3369                         dasd_reload_device(device);
3370                 }
3371                 dasd_generic_handle_state_change(device);
3372                 return;
3373         }
3374
3375         sense = dasd_get_sense(irb);
3376         if (!sense)
3377                 return;
3378
3379         /* summary unit check */
3380         if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3381             (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3382                 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3383                         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3384                                       "eckd suc: device already notified");
3385                         return;
3386                 }
3387                 sense = dasd_get_sense(irb);
3388                 if (!sense) {
3389                         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3390                                       "eckd suc: no reason code available");
3391                         clear_bit(DASD_FLAG_SUC, &device->flags);
3392                         return;
3393
3394                 }
3395                 private->suc_reason = sense[8];
3396                 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3397                               "eckd handle summary unit check: reason",
3398                               private->suc_reason);
3399                 dasd_get_device(device);
3400                 if (!schedule_work(&device->suc_work))
3401                         dasd_put_device(device);
3402
3403                 return;
3404         }
3405
3406         /* service information message SIM */
3407         if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3408             ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3409                 dasd_3990_erp_handle_sim(device, sense);
3410                 return;
3411         }
3412
3413         /* loss of device reservation is handled via base devices only
3414          * as alias devices may be used with several bases
3415          */
3416         if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3417             (sense[7] == 0x3F) &&
3418             (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3419             test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3420                 if (device->features & DASD_FEATURE_FAILONSLCK)
3421                         set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3422                 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3423                 dev_err(&device->cdev->dev,
3424                         "The device reservation was lost\n");
3425         }
3426 }
3427
3428 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3429                                        unsigned int first_trk,
3430                                        unsigned int last_trk)
3431 {
3432         struct dasd_eckd_private *private = device->private;
3433         unsigned int trks_per_vol;
3434         int rc = 0;
3435
3436         trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3437
3438         if (first_trk >= trks_per_vol) {
3439                 dev_warn(&device->cdev->dev,
3440                          "Start track number %u used in the space release command is too big\n",
3441                          first_trk);
3442                 rc = -EINVAL;
3443         } else if (last_trk >= trks_per_vol) {
3444                 dev_warn(&device->cdev->dev,
3445                          "Stop track number %u used in the space release command is too big\n",
3446                          last_trk);
3447                 rc = -EINVAL;
3448         } else if (first_trk > last_trk) {
3449                 dev_warn(&device->cdev->dev,
3450                          "Start track %u used in the space release command exceeds the end track\n",
3451                          first_trk);
3452                 rc = -EINVAL;
3453         }
3454         return rc;
3455 }
3456
3457 /*
3458  * Helper function to count the amount of involved extents within a given range
3459  * with extent alignment in mind.
3460  */
3461 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3462 {
3463         int cur_pos = 0;
3464         int count = 0;
3465         int tmp;
3466
3467         if (from == to)
3468                 return 1;
3469
3470         /* Count first partial extent */
3471         if (from % trks_per_ext != 0) {
3472                 tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3473                 if (tmp > to)
3474                         tmp = to;
3475                 cur_pos = tmp - from + 1;
3476                 count++;
3477         }
3478         /* Count full extents */
3479         if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3480                 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3481                 count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3482                 cur_pos = tmp;
3483         }
3484         /* Count last partial extent */
3485         if (cur_pos < to)
3486                 count++;
3487
3488         return count;
3489 }
3490
3491 /*
3492  * Release allocated space for a given range or an entire volume.
3493  */
3494 static struct dasd_ccw_req *
3495 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3496                   struct request *req, unsigned int first_trk,
3497                   unsigned int last_trk, int by_extent)
3498 {
3499         struct dasd_eckd_private *private = device->private;
3500         struct dasd_dso_ras_ext_range *ras_range;
3501         struct dasd_rssd_features *features;
3502         struct dasd_dso_ras_data *ras_data;
3503         u16 heads, beg_head, end_head;
3504         int cur_to_trk, cur_from_trk;
3505         struct dasd_ccw_req *cqr;
3506         u32 beg_cyl, end_cyl;
3507         struct ccw1 *ccw;
3508         int trks_per_ext;
3509         size_t ras_size;
3510         size_t size;
3511         int nr_exts;
3512         void *rq;
3513         int i;
3514
3515         if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3516                 return ERR_PTR(-EINVAL);
3517
3518         rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3519
3520         features = &private->features;
3521
3522         trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3523         nr_exts = 0;
3524         if (by_extent)
3525                 nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3526         ras_size = sizeof(*ras_data);
3527         size = ras_size + (nr_exts * sizeof(*ras_range));
3528
3529         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3530         if (IS_ERR(cqr)) {
3531                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3532                                 "Could not allocate RAS request");
3533                 return cqr;
3534         }
3535
3536         ras_data = cqr->data;
3537         memset(ras_data, 0, size);
3538
3539         ras_data->order = DSO_ORDER_RAS;
3540         ras_data->flags.vol_type = 0; /* CKD volume */
3541         /* Release specified extents or entire volume */
3542         ras_data->op_flags.by_extent = by_extent;
3543         /*
3544          * This bit guarantees initialisation of tracks within an extent that is
3545          * not fully specified, but is only supported with a certain feature
3546          * subset.
3547          */
3548         ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
3549         ras_data->lss = private->ned->ID;
3550         ras_data->dev_addr = private->ned->unit_addr;
3551         ras_data->nr_exts = nr_exts;
3552
3553         if (by_extent) {
3554                 heads = private->rdc_data.trk_per_cyl;
3555                 cur_from_trk = first_trk;
3556                 cur_to_trk = first_trk + trks_per_ext -
3557                         (first_trk % trks_per_ext) - 1;
3558                 if (cur_to_trk > last_trk)
3559                         cur_to_trk = last_trk;
3560                 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3561
3562                 for (i = 0; i < nr_exts; i++) {
3563                         beg_cyl = cur_from_trk / heads;
3564                         beg_head = cur_from_trk % heads;
3565                         end_cyl = cur_to_trk / heads;
3566                         end_head = cur_to_trk % heads;
3567
3568                         set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3569                         set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3570
3571                         cur_from_trk = cur_to_trk + 1;
3572                         cur_to_trk = cur_from_trk + trks_per_ext - 1;
3573                         if (cur_to_trk > last_trk)
3574                                 cur_to_trk = last_trk;
3575                         ras_range++;
3576                 }
3577         }
3578
3579         ccw = cqr->cpaddr;
3580         ccw->cda = (__u32)(addr_t)cqr->data;
3581         ccw->cmd_code = DASD_ECKD_CCW_DSO;
3582         ccw->count = size;
3583
3584         cqr->startdev = device;
3585         cqr->memdev = device;
3586         cqr->block = block;
3587         cqr->retries = 256;
3588         cqr->expires = device->default_expires * HZ;
3589         cqr->buildclk = get_tod_clock();
3590         cqr->status = DASD_CQR_FILLED;
3591
3592         return cqr;
3593 }
3594
3595 static int dasd_eckd_release_space_full(struct dasd_device *device)
3596 {
3597         struct dasd_ccw_req *cqr;
3598         int rc;
3599
3600         cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3601         if (IS_ERR(cqr))
3602                 return PTR_ERR(cqr);
3603
3604         rc = dasd_sleep_on_interruptible(cqr);
3605
3606         dasd_sfree_request(cqr, cqr->memdev);
3607
3608         return rc;
3609 }
3610
3611 static int dasd_eckd_release_space_trks(struct dasd_device *device,
3612                                         unsigned int from, unsigned int to)
3613 {
3614         struct dasd_eckd_private *private = device->private;
3615         struct dasd_block *block = device->block;
3616         struct dasd_ccw_req *cqr, *n;
3617         struct list_head ras_queue;
3618         unsigned int device_exts;
3619         int trks_per_ext;
3620         int stop, step;
3621         int cur_pos;
3622         int rc = 0;
3623         int retry;
3624
3625         INIT_LIST_HEAD(&ras_queue);
3626
3627         device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3628         trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3629
3630         /* Make sure device limits are not exceeded */
3631         step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3632         cur_pos = from;
3633
3634         do {
3635                 retry = 0;
3636                 while (cur_pos < to) {
3637                         stop = cur_pos + step -
3638                                 ((cur_pos + step) % trks_per_ext) - 1;
3639                         if (stop > to)
3640                                 stop = to;
3641
3642                         cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3643                         if (IS_ERR(cqr)) {
3644                                 rc = PTR_ERR(cqr);
3645                                 if (rc == -ENOMEM) {
3646                                         if (list_empty(&ras_queue))
3647                                                 goto out;
3648                                         retry = 1;
3649                                         break;
3650                                 }
3651                                 goto err_out;
3652                         }
3653
3654                         spin_lock_irq(&block->queue_lock);
3655                         list_add_tail(&cqr->blocklist, &ras_queue);
3656                         spin_unlock_irq(&block->queue_lock);
3657                         cur_pos = stop + 1;
3658                 }
3659
3660                 rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3661
3662 err_out:
3663                 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3664                         device = cqr->startdev;
3665                         private = device->private;
3666
3667                         spin_lock_irq(&block->queue_lock);
3668                         list_del_init(&cqr->blocklist);
3669                         spin_unlock_irq(&block->queue_lock);
3670                         dasd_sfree_request(cqr, device);
3671                         private->count--;
3672                 }
3673         } while (retry);
3674
3675 out:
3676         return rc;
3677 }
3678
3679 static int dasd_eckd_release_space(struct dasd_device *device,
3680                                    struct format_data_t *rdata)
3681 {
3682         if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3683                 return dasd_eckd_release_space_full(device);
3684         else if (rdata->intensity == 0)
3685                 return dasd_eckd_release_space_trks(device, rdata->start_unit,
3686                                                     rdata->stop_unit);
3687         else
3688                 return -EINVAL;
3689 }
3690
3691 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3692                                                struct dasd_device *startdev,
3693                                                struct dasd_block *block,
3694                                                struct request *req,
3695                                                sector_t first_rec,
3696                                                sector_t last_rec,
3697                                                sector_t first_trk,
3698                                                sector_t last_trk,
3699                                                unsigned int first_offs,
3700                                                unsigned int last_offs,
3701                                                unsigned int blk_per_trk,
3702                                                unsigned int blksize)
3703 {
3704         struct dasd_eckd_private *private;
3705         unsigned long *idaws;
3706         struct LO_eckd_data *LO_data;
3707         struct dasd_ccw_req *cqr;
3708         struct ccw1 *ccw;
3709         struct req_iterator iter;
3710         struct bio_vec bv;
3711         char *dst;
3712         unsigned int off;
3713         int count, cidaw, cplength, datasize;
3714         sector_t recid;
3715         unsigned char cmd, rcmd;
3716         int use_prefix;
3717         struct dasd_device *basedev;
3718
3719         basedev = block->base;
3720         private = basedev->private;
3721         if (rq_data_dir(req) == READ)
3722                 cmd = DASD_ECKD_CCW_READ_MT;
3723         else if (rq_data_dir(req) == WRITE)
3724                 cmd = DASD_ECKD_CCW_WRITE_MT;
3725         else
3726                 return ERR_PTR(-EINVAL);
3727
3728         /* Check struct bio and count the number of blocks for the request. */
3729         count = 0;
3730         cidaw = 0;
3731         rq_for_each_segment(bv, req, iter) {
3732                 if (bv.bv_len & (blksize - 1))
3733                         /* Eckd can only do full blocks. */
3734                         return ERR_PTR(-EINVAL);
3735                 count += bv.bv_len >> (block->s2b_shift + 9);
3736                 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3737                         cidaw += bv.bv_len >> (block->s2b_shift + 9);
3738         }
3739         /* Paranoia. */
3740         if (count != last_rec - first_rec + 1)
3741                 return ERR_PTR(-EINVAL);
3742
3743         /* use the prefix command if available */
3744         use_prefix = private->features.feature[8] & 0x01;
3745         if (use_prefix) {
3746                 /* 1x prefix + number of blocks */
3747                 cplength = 2 + count;
3748                 /* 1x prefix + cidaws*sizeof(long) */
3749                 datasize = sizeof(struct PFX_eckd_data) +
3750                         sizeof(struct LO_eckd_data) +
3751                         cidaw * sizeof(unsigned long);
3752         } else {
3753                 /* 1x define extent + 1x locate record + number of blocks */
3754                 cplength = 2 + count;
3755                 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
3756                 datasize = sizeof(struct DE_eckd_data) +
3757                         sizeof(struct LO_eckd_data) +
3758                         cidaw * sizeof(unsigned long);
3759         }
3760         /* Find out the number of additional locate record ccws for cdl. */
3761         if (private->uses_cdl && first_rec < 2*blk_per_trk) {
3762                 if (last_rec >= 2*blk_per_trk)
3763                         count = 2*blk_per_trk - first_rec;
3764                 cplength += count;
3765                 datasize += count*sizeof(struct LO_eckd_data);
3766         }
3767         /* Allocate the ccw request. */
3768         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3769                                    startdev, blk_mq_rq_to_pdu(req));
3770         if (IS_ERR(cqr))
3771                 return cqr;
3772         ccw = cqr->cpaddr;
3773         /* First ccw is define extent or prefix. */
3774         if (use_prefix) {
3775                 if (prefix(ccw++, cqr->data, first_trk,
3776                            last_trk, cmd, basedev, startdev) == -EAGAIN) {
3777                         /* Clock not in sync and XRC is enabled.
3778                          * Try again later.
3779                          */
3780                         dasd_sfree_request(cqr, startdev);
3781                         return ERR_PTR(-EAGAIN);
3782                 }
3783                 idaws = (unsigned long *) (cqr->data +
3784                                            sizeof(struct PFX_eckd_data));
3785         } else {
3786                 if (define_extent(ccw++, cqr->data, first_trk,
3787                                   last_trk, cmd, basedev, 0) == -EAGAIN) {
3788                         /* Clock not in sync and XRC is enabled.
3789                          * Try again later.
3790                          */
3791                         dasd_sfree_request(cqr, startdev);
3792                         return ERR_PTR(-EAGAIN);
3793                 }
3794                 idaws = (unsigned long *) (cqr->data +
3795                                            sizeof(struct DE_eckd_data));
3796         }
3797         /* Build locate_record+read/write/ccws. */
3798         LO_data = (struct LO_eckd_data *) (idaws + cidaw);
3799         recid = first_rec;
3800         if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
3801                 /* Only standard blocks so there is just one locate record. */
3802                 ccw[-1].flags |= CCW_FLAG_CC;
3803                 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
3804                               last_rec - recid + 1, cmd, basedev, blksize);
3805         }
3806         rq_for_each_segment(bv, req, iter) {
3807                 dst = page_address(bv.bv_page) + bv.bv_offset;
3808                 if (dasd_page_cache) {
3809                         char *copy = kmem_cache_alloc(dasd_page_cache,
3810                                                       GFP_DMA | __GFP_NOWARN);
3811                         if (copy && rq_data_dir(req) == WRITE)
3812                                 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
3813                         if (copy)
3814                                 dst = copy + bv.bv_offset;
3815                 }
3816                 for (off = 0; off < bv.bv_len; off += blksize) {
3817                         sector_t trkid = recid;
3818                         unsigned int recoffs = sector_div(trkid, blk_per_trk);
3819                         rcmd = cmd;
3820                         count = blksize;
3821                         /* Locate record for cdl special block ? */
3822                         if (private->uses_cdl && recid < 2*blk_per_trk) {
3823                                 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
3824                                         rcmd |= 0x8;
3825                                         count = dasd_eckd_cdl_reclen(recid);
3826                                         if (count < blksize &&
3827                                             rq_data_dir(req) == READ)
3828                                                 memset(dst + count, 0xe5,
3829                                                        blksize - count);
3830                                 }
3831                                 ccw[-1].flags |= CCW_FLAG_CC;
3832                                 locate_record(ccw++, LO_data++,
3833                                               trkid, recoffs + 1,
3834                                               1, rcmd, basedev, count);
3835                         }
3836                         /* Locate record for standard blocks ? */
3837                         if (private->uses_cdl && recid == 2*blk_per_trk) {
3838                                 ccw[-1].flags |= CCW_FLAG_CC;
3839                                 locate_record(ccw++, LO_data++,
3840                                               trkid, recoffs + 1,
3841                                               last_rec - recid + 1,
3842                                               cmd, basedev, count);
3843                         }
3844                         /* Read/write ccw. */
3845                         ccw[-1].flags |= CCW_FLAG_CC;
3846                         ccw->cmd_code = rcmd;
3847                         ccw->count = count;
3848                         if (idal_is_needed(dst, blksize)) {
3849                                 ccw->cda = (__u32)(addr_t) idaws;
3850                                 ccw->flags = CCW_FLAG_IDA;
3851                                 idaws = idal_create_words(idaws, dst, blksize);
3852                         } else {
3853                                 ccw->cda = (__u32)(addr_t) dst;
3854                                 ccw->flags = 0;
3855                         }
3856                         ccw++;
3857                         dst += blksize;
3858                         recid++;
3859                 }
3860         }
3861         if (blk_noretry_request(req) ||
3862             block->base->features & DASD_FEATURE_FAILFAST)
3863                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3864         cqr->startdev = startdev;
3865         cqr->memdev = startdev;
3866         cqr->block = block;
3867         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
3868         cqr->lpm = dasd_path_get_ppm(startdev);
3869         cqr->retries = startdev->default_retries;
3870         cqr->buildclk = get_tod_clock();
3871         cqr->status = DASD_CQR_FILLED;
3872
3873         /* Set flags to suppress output for expected errors */
3874         if (dasd_eckd_is_ese(basedev)) {
3875                 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
3876                 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
3877                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
3878         }
3879
3880         return cqr;
3881 }
3882
3883 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
3884                                                struct dasd_device *startdev,
3885                                                struct dasd_block *block,
3886                                                struct request *req,
3887                                                sector_t first_rec,
3888                                                sector_t last_rec,
3889                                                sector_t first_trk,
3890                                                sector_t last_trk,
3891                                                unsigned int first_offs,
3892                                                unsigned int last_offs,
3893                                                unsigned int blk_per_trk,
3894                                                unsigned int blksize)
3895 {
3896         unsigned long *idaws;
3897         struct dasd_ccw_req *cqr;
3898         struct ccw1 *ccw;
3899         struct req_iterator iter;
3900         struct bio_vec bv;
3901         char *dst, *idaw_dst;
3902         unsigned int cidaw, cplength, datasize;
3903         unsigned int tlf;
3904         sector_t recid;
3905         unsigned char cmd;
3906         struct dasd_device *basedev;
3907         unsigned int trkcount, count, count_to_trk_end;
3908         unsigned int idaw_len, seg_len, part_len, len_to_track_end;
3909         unsigned char new_track, end_idaw;
3910         sector_t trkid;
3911         unsigned int recoffs;
3912
3913         basedev = block->base;
3914         if (rq_data_dir(req) == READ)
3915                 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
3916         else if (rq_data_dir(req) == WRITE)
3917                 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
3918         else
3919                 return ERR_PTR(-EINVAL);
3920
3921         /* Track based I/O needs IDAWs for each page, and not just for
3922          * 64 bit addresses. We need additional idals for pages
3923          * that get filled from two tracks, so we use the number
3924          * of records as upper limit.
3925          */
3926         cidaw = last_rec - first_rec + 1;
3927         trkcount = last_trk - first_trk + 1;
3928
3929         /* 1x prefix + one read/write ccw per track */
3930         cplength = 1 + trkcount;
3931
3932         datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
3933
3934         /* Allocate the ccw request. */
3935         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3936                                    startdev, blk_mq_rq_to_pdu(req));
3937         if (IS_ERR(cqr))
3938                 return cqr;
3939         ccw = cqr->cpaddr;
3940         /* transfer length factor: how many bytes to read from the last track */
3941         if (first_trk == last_trk)
3942                 tlf = last_offs - first_offs + 1;
3943         else
3944                 tlf = last_offs + 1;
3945         tlf *= blksize;
3946
3947         if (prefix_LRE(ccw++, cqr->data, first_trk,
3948                        last_trk, cmd, basedev, startdev,
3949                        1 /* format */, first_offs + 1,
3950                        trkcount, blksize,
3951                        tlf) == -EAGAIN) {
3952                 /* Clock not in sync and XRC is enabled.
3953                  * Try again later.
3954                  */
3955                 dasd_sfree_request(cqr, startdev);
3956                 return ERR_PTR(-EAGAIN);
3957         }
3958
3959         /*
3960          * The translation of request into ccw programs must meet the
3961          * following conditions:
3962          * - all idaws but the first and the last must address full pages
3963          *   (or 2K blocks on 31-bit)
3964          * - the scope of a ccw and it's idal ends with the track boundaries
3965          */
3966         idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
3967         recid = first_rec;
3968         new_track = 1;
3969         end_idaw = 0;
3970         len_to_track_end = 0;
3971         idaw_dst = NULL;
3972         idaw_len = 0;
3973         rq_for_each_segment(bv, req, iter) {
3974                 dst = page_address(bv.bv_page) + bv.bv_offset;
3975                 seg_len = bv.bv_len;
3976                 while (seg_len) {
3977                         if (new_track) {
3978                                 trkid = recid;
3979                                 recoffs = sector_div(trkid, blk_per_trk);
3980                                 count_to_trk_end = blk_per_trk - recoffs;
3981                                 count = min((last_rec - recid + 1),
3982                                             (sector_t)count_to_trk_end);
3983                                 len_to_track_end = count * blksize;
3984                                 ccw[-1].flags |= CCW_FLAG_CC;
3985                                 ccw->cmd_code = cmd;
3986                                 ccw->count = len_to_track_end;
3987                                 ccw->cda = (__u32)(addr_t)idaws;
3988                                 ccw->flags = CCW_FLAG_IDA;
3989                                 ccw++;
3990                                 recid += count;
3991                                 new_track = 0;
3992                                 /* first idaw for a ccw may start anywhere */
3993                                 if (!idaw_dst)
3994                                         idaw_dst = dst;
3995                         }
3996                         /* If we start a new idaw, we must make sure that it
3997                          * starts on an IDA_BLOCK_SIZE boundary.
3998                          * If we continue an idaw, we must make sure that the
3999                          * current segment begins where the so far accumulated
4000                          * idaw ends
4001                          */
4002                         if (!idaw_dst) {
4003                                 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
4004                                         dasd_sfree_request(cqr, startdev);
4005                                         return ERR_PTR(-ERANGE);
4006                                 } else
4007                                         idaw_dst = dst;
4008                         }
4009                         if ((idaw_dst + idaw_len) != dst) {
4010                                 dasd_sfree_request(cqr, startdev);
4011                                 return ERR_PTR(-ERANGE);
4012                         }
4013                         part_len = min(seg_len, len_to_track_end);
4014                         seg_len -= part_len;
4015                         dst += part_len;
4016                         idaw_len += part_len;
4017                         len_to_track_end -= part_len;
4018                         /* collected memory area ends on an IDA_BLOCK border,
4019                          * -> create an idaw
4020                          * idal_create_words will handle cases where idaw_len
4021                          * is larger then IDA_BLOCK_SIZE
4022                          */
4023                         if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
4024                                 end_idaw = 1;
4025                         /* We also need to end the idaw at track end */
4026                         if (!len_to_track_end) {
4027                                 new_track = 1;
4028                                 end_idaw = 1;
4029                         }
4030                         if (end_idaw) {
4031                                 idaws = idal_create_words(idaws, idaw_dst,
4032                                                           idaw_len);
4033                                 idaw_dst = NULL;
4034                                 idaw_len = 0;
4035                                 end_idaw = 0;
4036                         }
4037                 }
4038         }
4039
4040         if (blk_noretry_request(req) ||
4041             block->base->features & DASD_FEATURE_FAILFAST)
4042                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4043         cqr->startdev = startdev;
4044         cqr->memdev = startdev;
4045         cqr->block = block;
4046         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4047         cqr->lpm = dasd_path_get_ppm(startdev);
4048         cqr->retries = startdev->default_retries;
4049         cqr->buildclk = get_tod_clock();
4050         cqr->status = DASD_CQR_FILLED;
4051
4052         /* Set flags to suppress output for expected errors */
4053         if (dasd_eckd_is_ese(basedev))
4054                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4055
4056         return cqr;
4057 }
4058
4059 static int prepare_itcw(struct itcw *itcw,
4060                         unsigned int trk, unsigned int totrk, int cmd,
4061                         struct dasd_device *basedev,
4062                         struct dasd_device *startdev,
4063                         unsigned int rec_on_trk, int count,
4064                         unsigned int blksize,
4065                         unsigned int total_data_size,
4066                         unsigned int tlf,
4067                         unsigned int blk_per_trk)
4068 {
4069         struct PFX_eckd_data pfxdata;
4070         struct dasd_eckd_private *basepriv, *startpriv;
4071         struct DE_eckd_data *dedata;
4072         struct LRE_eckd_data *lredata;
4073         struct dcw *dcw;
4074
4075         u32 begcyl, endcyl;
4076         u16 heads, beghead, endhead;
4077         u8 pfx_cmd;
4078
4079         int rc = 0;
4080         int sector = 0;
4081         int dn, d;
4082
4083
4084         /* setup prefix data */
4085         basepriv = basedev->private;
4086         startpriv = startdev->private;
4087         dedata = &pfxdata.define_extent;
4088         lredata = &pfxdata.locate_record;
4089
4090         memset(&pfxdata, 0, sizeof(pfxdata));
4091         pfxdata.format = 1; /* PFX with LRE */
4092         pfxdata.base_address = basepriv->ned->unit_addr;
4093         pfxdata.base_lss = basepriv->ned->ID;
4094         pfxdata.validity.define_extent = 1;
4095
4096         /* private uid is kept up to date, conf_data may be outdated */
4097         if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4098                 pfxdata.validity.verify_base = 1;
4099
4100         if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
4101                 pfxdata.validity.verify_base = 1;
4102                 pfxdata.validity.hyper_pav = 1;
4103         }
4104
4105         switch (cmd) {
4106         case DASD_ECKD_CCW_READ_TRACK_DATA:
4107                 dedata->mask.perm = 0x1;
4108                 dedata->attributes.operation = basepriv->attrib.operation;
4109                 dedata->blk_size = blksize;
4110                 dedata->ga_extended |= 0x42;
4111                 lredata->operation.orientation = 0x0;
4112                 lredata->operation.operation = 0x0C;
4113                 lredata->auxiliary.check_bytes = 0x01;
4114                 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4115                 break;
4116         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4117                 dedata->mask.perm = 0x02;
4118                 dedata->attributes.operation = basepriv->attrib.operation;
4119                 dedata->blk_size = blksize;
4120                 rc = set_timestamp(NULL, dedata, basedev);
4121                 dedata->ga_extended |= 0x42;
4122                 lredata->operation.orientation = 0x0;
4123                 lredata->operation.operation = 0x3F;
4124                 lredata->extended_operation = 0x23;
4125                 lredata->auxiliary.check_bytes = 0x2;
4126                 /*
4127                  * If XRC is supported the System Time Stamp is set. The
4128                  * validity of the time stamp must be reflected in the prefix
4129                  * data as well.
4130                  */
4131                 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4132                         pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
4133                 pfx_cmd = DASD_ECKD_CCW_PFX;
4134                 break;
4135         case DASD_ECKD_CCW_READ_COUNT_MT:
4136                 dedata->mask.perm = 0x1;
4137                 dedata->attributes.operation = DASD_BYPASS_CACHE;
4138                 dedata->ga_extended |= 0x42;
4139                 dedata->blk_size = blksize;
4140                 lredata->operation.orientation = 0x2;
4141                 lredata->operation.operation = 0x16;
4142                 lredata->auxiliary.check_bytes = 0x01;
4143                 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4144                 break;
4145         default:
4146                 DBF_DEV_EVENT(DBF_ERR, basedev,
4147                               "prepare itcw, unknown opcode 0x%x", cmd);
4148                 BUG();
4149                 break;
4150         }
4151         if (rc)
4152                 return rc;
4153
4154         dedata->attributes.mode = 0x3;  /* ECKD */
4155
4156         heads = basepriv->rdc_data.trk_per_cyl;
4157         begcyl = trk / heads;
4158         beghead = trk % heads;
4159         endcyl = totrk / heads;
4160         endhead = totrk % heads;
4161
4162         /* check for sequential prestage - enhance cylinder range */
4163         if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4164             dedata->attributes.operation == DASD_SEQ_ACCESS) {
4165
4166                 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4167                         endcyl += basepriv->attrib.nr_cyl;
4168                 else
4169                         endcyl = (basepriv->real_cyl - 1);
4170         }
4171
4172         set_ch_t(&dedata->beg_ext, begcyl, beghead);
4173         set_ch_t(&dedata->end_ext, endcyl, endhead);
4174
4175         dedata->ep_format = 0x20; /* records per track is valid */
4176         dedata->ep_rec_per_track = blk_per_trk;
4177
4178         if (rec_on_trk) {
4179                 switch (basepriv->rdc_data.dev_type) {
4180                 case 0x3390:
4181                         dn = ceil_quot(blksize + 6, 232);
4182                         d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4183                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4184                         break;
4185                 case 0x3380:
4186                         d = 7 + ceil_quot(blksize + 12, 32);
4187                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4188                         break;
4189                 }
4190         }
4191
4192         if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4193                 lredata->auxiliary.length_valid = 0;
4194                 lredata->auxiliary.length_scope = 0;
4195                 lredata->sector = 0xff;
4196         } else {
4197                 lredata->auxiliary.length_valid = 1;
4198                 lredata->auxiliary.length_scope = 1;
4199                 lredata->sector = sector;
4200         }
4201         lredata->auxiliary.imbedded_ccw_valid = 1;
4202         lredata->length = tlf;
4203         lredata->imbedded_ccw = cmd;
4204         lredata->count = count;
4205         set_ch_t(&lredata->seek_addr, begcyl, beghead);
4206         lredata->search_arg.cyl = lredata->seek_addr.cyl;
4207         lredata->search_arg.head = lredata->seek_addr.head;
4208         lredata->search_arg.record = rec_on_trk;
4209
4210         dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4211                      &pfxdata, sizeof(pfxdata), total_data_size);
4212         return PTR_ERR_OR_ZERO(dcw);
4213 }
4214
4215 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4216                                                struct dasd_device *startdev,
4217                                                struct dasd_block *block,
4218                                                struct request *req,
4219                                                sector_t first_rec,
4220                                                sector_t last_rec,
4221                                                sector_t first_trk,
4222                                                sector_t last_trk,
4223                                                unsigned int first_offs,
4224                                                unsigned int last_offs,
4225                                                unsigned int blk_per_trk,
4226                                                unsigned int blksize)
4227 {
4228         struct dasd_ccw_req *cqr;
4229         struct req_iterator iter;
4230         struct bio_vec bv;
4231         char *dst;
4232         unsigned int trkcount, ctidaw;
4233         unsigned char cmd;
4234         struct dasd_device *basedev;
4235         unsigned int tlf;
4236         struct itcw *itcw;
4237         struct tidaw *last_tidaw = NULL;
4238         int itcw_op;
4239         size_t itcw_size;
4240         u8 tidaw_flags;
4241         unsigned int seg_len, part_len, len_to_track_end;
4242         unsigned char new_track;
4243         sector_t recid, trkid;
4244         unsigned int offs;
4245         unsigned int count, count_to_trk_end;
4246         int ret;
4247
4248         basedev = block->base;
4249         if (rq_data_dir(req) == READ) {
4250                 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4251                 itcw_op = ITCW_OP_READ;
4252         } else if (rq_data_dir(req) == WRITE) {
4253                 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4254                 itcw_op = ITCW_OP_WRITE;
4255         } else
4256                 return ERR_PTR(-EINVAL);
4257
4258         /* trackbased I/O needs address all memory via TIDAWs,
4259          * not just for 64 bit addresses. This allows us to map
4260          * each segment directly to one tidaw.
4261          * In the case of write requests, additional tidaws may
4262          * be needed when a segment crosses a track boundary.
4263          */
4264         trkcount = last_trk - first_trk + 1;
4265         ctidaw = 0;
4266         rq_for_each_segment(bv, req, iter) {
4267                 ++ctidaw;
4268         }
4269         if (rq_data_dir(req) == WRITE)
4270                 ctidaw += (last_trk - first_trk);
4271
4272         /* Allocate the ccw request. */
4273         itcw_size = itcw_calc_size(0, ctidaw, 0);
4274         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4275                                    blk_mq_rq_to_pdu(req));
4276         if (IS_ERR(cqr))
4277                 return cqr;
4278
4279         /* transfer length factor: how many bytes to read from the last track */
4280         if (first_trk == last_trk)
4281                 tlf = last_offs - first_offs + 1;
4282         else
4283                 tlf = last_offs + 1;
4284         tlf *= blksize;
4285
4286         itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4287         if (IS_ERR(itcw)) {
4288                 ret = -EINVAL;
4289                 goto out_error;
4290         }
4291         cqr->cpaddr = itcw_get_tcw(itcw);
4292         if (prepare_itcw(itcw, first_trk, last_trk,
4293                          cmd, basedev, startdev,
4294                          first_offs + 1,
4295                          trkcount, blksize,
4296                          (last_rec - first_rec + 1) * blksize,
4297                          tlf, blk_per_trk) == -EAGAIN) {
4298                 /* Clock not in sync and XRC is enabled.
4299                  * Try again later.
4300                  */
4301                 ret = -EAGAIN;
4302                 goto out_error;
4303         }
4304         len_to_track_end = 0;
4305         /*
4306          * A tidaw can address 4k of memory, but must not cross page boundaries
4307          * We can let the block layer handle this by setting
4308          * blk_queue_segment_boundary to page boundaries and
4309          * blk_max_segment_size to page size when setting up the request queue.
4310          * For write requests, a TIDAW must not cross track boundaries, because
4311          * we have to set the CBC flag on the last tidaw for each track.
4312          */
4313         if (rq_data_dir(req) == WRITE) {
4314                 new_track = 1;
4315                 recid = first_rec;
4316                 rq_for_each_segment(bv, req, iter) {
4317                         dst = page_address(bv.bv_page) + bv.bv_offset;
4318                         seg_len = bv.bv_len;
4319                         while (seg_len) {
4320                                 if (new_track) {
4321                                         trkid = recid;
4322                                         offs = sector_div(trkid, blk_per_trk);
4323                                         count_to_trk_end = blk_per_trk - offs;
4324                                         count = min((last_rec - recid + 1),
4325                                                     (sector_t)count_to_trk_end);
4326                                         len_to_track_end = count * blksize;
4327                                         recid += count;
4328                                         new_track = 0;
4329                                 }
4330                                 part_len = min(seg_len, len_to_track_end);
4331                                 seg_len -= part_len;
4332                                 len_to_track_end -= part_len;
4333                                 /* We need to end the tidaw at track end */
4334                                 if (!len_to_track_end) {
4335                                         new_track = 1;
4336                                         tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4337                                 } else
4338                                         tidaw_flags = 0;
4339                                 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4340                                                             dst, part_len);
4341                                 if (IS_ERR(last_tidaw)) {
4342                                         ret = -EINVAL;
4343                                         goto out_error;
4344                                 }
4345                                 dst += part_len;
4346                         }
4347                 }
4348         } else {
4349                 rq_for_each_segment(bv, req, iter) {
4350                         dst = page_address(bv.bv_page) + bv.bv_offset;
4351                         last_tidaw = itcw_add_tidaw(itcw, 0x00,
4352                                                     dst, bv.bv_len);
4353                         if (IS_ERR(last_tidaw)) {
4354                                 ret = -EINVAL;
4355                                 goto out_error;
4356                         }
4357                 }
4358         }
4359         last_tidaw->flags |= TIDAW_FLAGS_LAST;
4360         last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4361         itcw_finalize(itcw);
4362
4363         if (blk_noretry_request(req) ||
4364             block->base->features & DASD_FEATURE_FAILFAST)
4365                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4366         cqr->cpmode = 1;
4367         cqr->startdev = startdev;
4368         cqr->memdev = startdev;
4369         cqr->block = block;
4370         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4371         cqr->lpm = dasd_path_get_ppm(startdev);
4372         cqr->retries = startdev->default_retries;
4373         cqr->buildclk = get_tod_clock();
4374         cqr->status = DASD_CQR_FILLED;
4375
4376         /* Set flags to suppress output for expected errors */
4377         if (dasd_eckd_is_ese(basedev)) {
4378                 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4379                 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4380                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4381         }
4382
4383         return cqr;
4384 out_error:
4385         dasd_sfree_request(cqr, startdev);
4386         return ERR_PTR(ret);
4387 }
4388
4389 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4390                                                struct dasd_block *block,
4391                                                struct request *req)
4392 {
4393         int cmdrtd, cmdwtd;
4394         int use_prefix;
4395         int fcx_multitrack;
4396         struct dasd_eckd_private *private;
4397         struct dasd_device *basedev;
4398         sector_t first_rec, last_rec;
4399         sector_t first_trk, last_trk;
4400         unsigned int first_offs, last_offs;
4401         unsigned int blk_per_trk, blksize;
4402         int cdlspecial;
4403         unsigned int data_size;
4404         struct dasd_ccw_req *cqr;
4405
4406         basedev = block->base;
4407         private = basedev->private;
4408
4409         /* Calculate number of blocks/records per track. */
4410         blksize = block->bp_block;
4411         blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4412         if (blk_per_trk == 0)
4413                 return ERR_PTR(-EINVAL);
4414         /* Calculate record id of first and last block. */
4415         first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4416         first_offs = sector_div(first_trk, blk_per_trk);
4417         last_rec = last_trk =
4418                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4419         last_offs = sector_div(last_trk, blk_per_trk);
4420         cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4421
4422         fcx_multitrack = private->features.feature[40] & 0x20;
4423         data_size = blk_rq_bytes(req);
4424         if (data_size % blksize)
4425                 return ERR_PTR(-EINVAL);
4426         /* tpm write request add CBC data on each track boundary */
4427         if (rq_data_dir(req) == WRITE)
4428                 data_size += (last_trk - first_trk) * 4;
4429
4430         /* is read track data and write track data in command mode supported? */
4431         cmdrtd = private->features.feature[9] & 0x20;
4432         cmdwtd = private->features.feature[12] & 0x40;
4433         use_prefix = private->features.feature[8] & 0x01;
4434
4435         cqr = NULL;
4436         if (cdlspecial || dasd_page_cache) {
4437                 /* do nothing, just fall through to the cmd mode single case */
4438         } else if ((data_size <= private->fcx_max_data)
4439                    && (fcx_multitrack || (first_trk == last_trk))) {
4440                 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4441                                                     first_rec, last_rec,
4442                                                     first_trk, last_trk,
4443                                                     first_offs, last_offs,
4444                                                     blk_per_trk, blksize);
4445                 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4446                     (PTR_ERR(cqr) != -ENOMEM))
4447                         cqr = NULL;
4448         } else if (use_prefix &&
4449                    (((rq_data_dir(req) == READ) && cmdrtd) ||
4450                     ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4451                 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4452                                                    first_rec, last_rec,
4453                                                    first_trk, last_trk,
4454                                                    first_offs, last_offs,
4455                                                    blk_per_trk, blksize);
4456                 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4457                     (PTR_ERR(cqr) != -ENOMEM))
4458                         cqr = NULL;
4459         }
4460         if (!cqr)
4461                 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4462                                                     first_rec, last_rec,
4463                                                     first_trk, last_trk,
4464                                                     first_offs, last_offs,
4465                                                     blk_per_trk, blksize);
4466         return cqr;
4467 }
4468
4469 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4470                                                    struct dasd_block *block,
4471                                                    struct request *req)
4472 {
4473         sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4474         unsigned int seg_len, len_to_track_end;
4475         unsigned int cidaw, cplength, datasize;
4476         sector_t first_trk, last_trk, sectors;
4477         struct dasd_eckd_private *base_priv;
4478         struct dasd_device *basedev;
4479         struct req_iterator iter;
4480         struct dasd_ccw_req *cqr;
4481         unsigned int first_offs;
4482         unsigned int trkcount;
4483         unsigned long *idaws;
4484         unsigned int size;
4485         unsigned char cmd;
4486         struct bio_vec bv;
4487         struct ccw1 *ccw;
4488         int use_prefix;
4489         void *data;
4490         char *dst;
4491
4492         /*
4493          * raw track access needs to be mutiple of 64k and on 64k boundary
4494          * For read requests we can fix an incorrect alignment by padding
4495          * the request with dummy pages.
4496          */
4497         start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4498         end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4499                 DASD_RAW_SECTORS_PER_TRACK;
4500         end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4501                 DASD_RAW_SECTORS_PER_TRACK;
4502         basedev = block->base;
4503         if ((start_padding_sectors || end_padding_sectors) &&
4504             (rq_data_dir(req) == WRITE)) {
4505                 DBF_DEV_EVENT(DBF_ERR, basedev,
4506                               "raw write not track aligned (%llu,%llu) req %p",
4507                               start_padding_sectors, end_padding_sectors, req);
4508                 return ERR_PTR(-EINVAL);
4509         }
4510
4511         first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4512         last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4513                 DASD_RAW_SECTORS_PER_TRACK;
4514         trkcount = last_trk - first_trk + 1;
4515         first_offs = 0;
4516
4517         if (rq_data_dir(req) == READ)
4518                 cmd = DASD_ECKD_CCW_READ_TRACK;
4519         else if (rq_data_dir(req) == WRITE)
4520                 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4521         else
4522                 return ERR_PTR(-EINVAL);
4523
4524         /*
4525          * Raw track based I/O needs IDAWs for each page,
4526          * and not just for 64 bit addresses.
4527          */
4528         cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4529
4530         /*
4531          * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4532          * of extended parameter. This is needed for write full track.
4533          */
4534         base_priv = basedev->private;
4535         use_prefix = base_priv->features.feature[8] & 0x01;
4536         if (use_prefix) {
4537                 cplength = 1 + trkcount;
4538                 size = sizeof(struct PFX_eckd_data) + 2;
4539         } else {
4540                 cplength = 2 + trkcount;
4541                 size = sizeof(struct DE_eckd_data) +
4542                         sizeof(struct LRE_eckd_data) + 2;
4543         }
4544         size = ALIGN(size, 8);
4545
4546         datasize = size + cidaw * sizeof(unsigned long);
4547
4548         /* Allocate the ccw request. */
4549         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4550                                    datasize, startdev, blk_mq_rq_to_pdu(req));
4551         if (IS_ERR(cqr))
4552                 return cqr;
4553
4554         ccw = cqr->cpaddr;
4555         data = cqr->data;
4556
4557         if (use_prefix) {
4558                 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4559                            startdev, 1, first_offs + 1, trkcount, 0, 0);
4560         } else {
4561                 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4562                 ccw[-1].flags |= CCW_FLAG_CC;
4563
4564                 data += sizeof(struct DE_eckd_data);
4565                 locate_record_ext(ccw++, data, first_trk, first_offs + 1,
4566                                   trkcount, cmd, basedev, 0, 0);
4567         }
4568
4569         idaws = (unsigned long *)(cqr->data + size);
4570         len_to_track_end = 0;
4571         if (start_padding_sectors) {
4572                 ccw[-1].flags |= CCW_FLAG_CC;
4573                 ccw->cmd_code = cmd;
4574                 /* maximum 3390 track size */
4575                 ccw->count = 57326;
4576                 /* 64k map to one track */
4577                 len_to_track_end = 65536 - start_padding_sectors * 512;
4578                 ccw->cda = (__u32)(addr_t)idaws;
4579                 ccw->flags |= CCW_FLAG_IDA;
4580                 ccw->flags |= CCW_FLAG_SLI;
4581                 ccw++;
4582                 for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4583                         idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4584         }
4585         rq_for_each_segment(bv, req, iter) {
4586                 dst = page_address(bv.bv_page) + bv.bv_offset;
4587                 seg_len = bv.bv_len;
4588                 if (cmd == DASD_ECKD_CCW_READ_TRACK)
4589                         memset(dst, 0, seg_len);
4590                 if (!len_to_track_end) {
4591                         ccw[-1].flags |= CCW_FLAG_CC;
4592                         ccw->cmd_code = cmd;
4593                         /* maximum 3390 track size */
4594                         ccw->count = 57326;
4595                         /* 64k map to one track */
4596                         len_to_track_end = 65536;
4597                         ccw->cda = (__u32)(addr_t)idaws;
4598                         ccw->flags |= CCW_FLAG_IDA;
4599                         ccw->flags |= CCW_FLAG_SLI;
4600                         ccw++;
4601                 }
4602                 len_to_track_end -= seg_len;
4603                 idaws = idal_create_words(idaws, dst, seg_len);
4604         }
4605         for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4606                 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4607         if (blk_noretry_request(req) ||
4608             block->base->features & DASD_FEATURE_FAILFAST)
4609                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4610         cqr->startdev = startdev;
4611         cqr->memdev = startdev;
4612         cqr->block = block;
4613         cqr->expires = startdev->default_expires * HZ;
4614         cqr->lpm = dasd_path_get_ppm(startdev);
4615         cqr->retries = startdev->default_retries;
4616         cqr->buildclk = get_tod_clock();
4617         cqr->status = DASD_CQR_FILLED;
4618
4619         return cqr;
4620 }
4621
4622
4623 static int
4624 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4625 {
4626         struct dasd_eckd_private *private;
4627         struct ccw1 *ccw;
4628         struct req_iterator iter;
4629         struct bio_vec bv;
4630         char *dst, *cda;
4631         unsigned int blksize, blk_per_trk, off;
4632         sector_t recid;
4633         int status;
4634
4635         if (!dasd_page_cache)
4636                 goto out;
4637         private = cqr->block->base->private;
4638         blksize = cqr->block->bp_block;
4639         blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4640         recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4641         ccw = cqr->cpaddr;
4642         /* Skip over define extent & locate record. */
4643         ccw++;
4644         if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4645                 ccw++;
4646         rq_for_each_segment(bv, req, iter) {
4647                 dst = page_address(bv.bv_page) + bv.bv_offset;
4648                 for (off = 0; off < bv.bv_len; off += blksize) {
4649                         /* Skip locate record. */
4650                         if (private->uses_cdl && recid <= 2*blk_per_trk)
4651                                 ccw++;
4652                         if (dst) {
4653                                 if (ccw->flags & CCW_FLAG_IDA)
4654                                         cda = *((char **)((addr_t) ccw->cda));
4655                                 else
4656                                         cda = (char *)((addr_t) ccw->cda);
4657                                 if (dst != cda) {
4658                                         if (rq_data_dir(req) == READ)
4659                                                 memcpy(dst, cda, bv.bv_len);
4660                                         kmem_cache_free(dasd_page_cache,
4661                                             (void *)((addr_t)cda & PAGE_MASK));
4662                                 }
4663                                 dst = NULL;
4664                         }
4665                         ccw++;
4666                         recid++;
4667                 }
4668         }
4669 out:
4670         status = cqr->status == DASD_CQR_DONE;
4671         dasd_sfree_request(cqr, cqr->memdev);
4672         return status;
4673 }
4674
4675 /*
4676  * Modify ccw/tcw in cqr so it can be started on a base device.
4677  *
4678  * Note that this is not enough to restart the cqr!
4679  * Either reset cqr->startdev as well (summary unit check handling)
4680  * or restart via separate cqr (as in ERP handling).
4681  */
4682 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4683 {
4684         struct ccw1 *ccw;
4685         struct PFX_eckd_data *pfxdata;
4686         struct tcw *tcw;
4687         struct tccb *tccb;
4688         struct dcw *dcw;
4689
4690         if (cqr->cpmode == 1) {
4691                 tcw = cqr->cpaddr;
4692                 tccb = tcw_get_tccb(tcw);
4693                 dcw = (struct dcw *)&tccb->tca[0];
4694                 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4695                 pfxdata->validity.verify_base = 0;
4696                 pfxdata->validity.hyper_pav = 0;
4697         } else {
4698                 ccw = cqr->cpaddr;
4699                 pfxdata = cqr->data;
4700                 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4701                         pfxdata->validity.verify_base = 0;
4702                         pfxdata->validity.hyper_pav = 0;
4703                 }
4704         }
4705 }
4706
4707 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4708
4709 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4710                                                      struct dasd_block *block,
4711                                                      struct request *req)
4712 {
4713         struct dasd_eckd_private *private;
4714         struct dasd_device *startdev;
4715         unsigned long flags;
4716         struct dasd_ccw_req *cqr;
4717
4718         startdev = dasd_alias_get_start_dev(base);
4719         if (!startdev)
4720                 startdev = base;
4721         private = startdev->private;
4722         if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4723                 return ERR_PTR(-EBUSY);
4724
4725         spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4726         private->count++;
4727         if ((base->features & DASD_FEATURE_USERAW))
4728                 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4729         else
4730                 cqr = dasd_eckd_build_cp(startdev, block, req);
4731         if (IS_ERR(cqr))
4732                 private->count--;
4733         spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4734         return cqr;
4735 }
4736
4737 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4738                                    struct request *req)
4739 {
4740         struct dasd_eckd_private *private;
4741         unsigned long flags;
4742
4743         spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4744         private = cqr->memdev->private;
4745         private->count--;
4746         spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4747         return dasd_eckd_free_cp(cqr, req);
4748 }
4749
4750 static int
4751 dasd_eckd_fill_info(struct dasd_device * device,
4752                     struct dasd_information2_t * info)
4753 {
4754         struct dasd_eckd_private *private = device->private;
4755
4756         info->label_block = 2;
4757         info->FBA_layout = private->uses_cdl ? 0 : 1;
4758         info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
4759         info->characteristics_size = sizeof(private->rdc_data);
4760         memcpy(info->characteristics, &private->rdc_data,
4761                sizeof(private->rdc_data));
4762         info->confdata_size = min((unsigned long)private->conf_len,
4763                                   sizeof(info->configuration_data));
4764         memcpy(info->configuration_data, private->conf_data,
4765                info->confdata_size);
4766         return 0;
4767 }
4768
4769 /*
4770  * SECTION: ioctl functions for eckd devices.
4771  */
4772
4773 /*
4774  * Release device ioctl.
4775  * Buils a channel programm to releases a prior reserved
4776  * (see dasd_eckd_reserve) device.
4777  */
4778 static int
4779 dasd_eckd_release(struct dasd_device *device)
4780 {
4781         struct dasd_ccw_req *cqr;
4782         int rc;
4783         struct ccw1 *ccw;
4784         int useglobal;
4785
4786         if (!capable(CAP_SYS_ADMIN))
4787                 return -EACCES;
4788
4789         useglobal = 0;
4790         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4791         if (IS_ERR(cqr)) {
4792                 mutex_lock(&dasd_reserve_mutex);
4793                 useglobal = 1;
4794                 cqr = &dasd_reserve_req->cqr;
4795                 memset(cqr, 0, sizeof(*cqr));
4796                 memset(&dasd_reserve_req->ccw, 0,
4797                        sizeof(dasd_reserve_req->ccw));
4798                 cqr->cpaddr = &dasd_reserve_req->ccw;
4799                 cqr->data = &dasd_reserve_req->data;
4800                 cqr->magic = DASD_ECKD_MAGIC;
4801         }
4802         ccw = cqr->cpaddr;
4803         ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
4804         ccw->flags |= CCW_FLAG_SLI;
4805         ccw->count = 32;
4806         ccw->cda = (__u32)(addr_t) cqr->data;
4807         cqr->startdev = device;
4808         cqr->memdev = device;
4809         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4810         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4811         cqr->retries = 2;       /* set retry counter to enable basic ERP */
4812         cqr->expires = 2 * HZ;
4813         cqr->buildclk = get_tod_clock();
4814         cqr->status = DASD_CQR_FILLED;
4815
4816         rc = dasd_sleep_on_immediatly(cqr);
4817         if (!rc)
4818                 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4819
4820         if (useglobal)
4821                 mutex_unlock(&dasd_reserve_mutex);
4822         else
4823                 dasd_sfree_request(cqr, cqr->memdev);
4824         return rc;
4825 }
4826
4827 /*
4828  * Reserve device ioctl.
4829  * Options are set to 'synchronous wait for interrupt' and
4830  * 'timeout the request'. This leads to a terminate IO if
4831  * the interrupt is outstanding for a certain time.
4832  */
4833 static int
4834 dasd_eckd_reserve(struct dasd_device *device)
4835 {
4836         struct dasd_ccw_req *cqr;
4837         int rc;
4838         struct ccw1 *ccw;
4839         int useglobal;
4840
4841         if (!capable(CAP_SYS_ADMIN))
4842                 return -EACCES;
4843
4844         useglobal = 0;
4845         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4846         if (IS_ERR(cqr)) {
4847                 mutex_lock(&dasd_reserve_mutex);
4848                 useglobal = 1;
4849                 cqr = &dasd_reserve_req->cqr;
4850                 memset(cqr, 0, sizeof(*cqr));
4851                 memset(&dasd_reserve_req->ccw, 0,
4852                        sizeof(dasd_reserve_req->ccw));
4853                 cqr->cpaddr = &dasd_reserve_req->ccw;
4854                 cqr->data = &dasd_reserve_req->data;
4855                 cqr->magic = DASD_ECKD_MAGIC;
4856         }
4857         ccw = cqr->cpaddr;
4858         ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
4859         ccw->flags |= CCW_FLAG_SLI;
4860         ccw->count = 32;
4861         ccw->cda = (__u32)(addr_t) cqr->data;
4862         cqr->startdev = device;
4863         cqr->memdev = device;
4864         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4865         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4866         cqr->retries = 2;       /* set retry counter to enable basic ERP */
4867         cqr->expires = 2 * HZ;
4868         cqr->buildclk = get_tod_clock();
4869         cqr->status = DASD_CQR_FILLED;
4870
4871         rc = dasd_sleep_on_immediatly(cqr);
4872         if (!rc)
4873                 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4874
4875         if (useglobal)
4876                 mutex_unlock(&dasd_reserve_mutex);
4877         else
4878                 dasd_sfree_request(cqr, cqr->memdev);
4879         return rc;
4880 }
4881
4882 /*
4883  * Steal lock ioctl - unconditional reserve device.
4884  * Buils a channel programm to break a device's reservation.
4885  * (unconditional reserve)
4886  */
4887 static int
4888 dasd_eckd_steal_lock(struct dasd_device *device)
4889 {
4890         struct dasd_ccw_req *cqr;
4891         int rc;
4892         struct ccw1 *ccw;
4893         int useglobal;
4894
4895         if (!capable(CAP_SYS_ADMIN))
4896                 return -EACCES;
4897
4898         useglobal = 0;
4899         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4900         if (IS_ERR(cqr)) {
4901                 mutex_lock(&dasd_reserve_mutex);
4902                 useglobal = 1;
4903                 cqr = &dasd_reserve_req->cqr;
4904                 memset(cqr, 0, sizeof(*cqr));
4905                 memset(&dasd_reserve_req->ccw, 0,
4906                        sizeof(dasd_reserve_req->ccw));
4907                 cqr->cpaddr = &dasd_reserve_req->ccw;
4908                 cqr->data = &dasd_reserve_req->data;
4909                 cqr->magic = DASD_ECKD_MAGIC;
4910         }
4911         ccw = cqr->cpaddr;
4912         ccw->cmd_code = DASD_ECKD_CCW_SLCK;
4913         ccw->flags |= CCW_FLAG_SLI;
4914         ccw->count = 32;
4915         ccw->cda = (__u32)(addr_t) cqr->data;
4916         cqr->startdev = device;
4917         cqr->memdev = device;
4918         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4919         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4920         cqr->retries = 2;       /* set retry counter to enable basic ERP */
4921         cqr->expires = 2 * HZ;
4922         cqr->buildclk = get_tod_clock();
4923         cqr->status = DASD_CQR_FILLED;
4924
4925         rc = dasd_sleep_on_immediatly(cqr);
4926         if (!rc)
4927                 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4928
4929         if (useglobal)
4930                 mutex_unlock(&dasd_reserve_mutex);
4931         else
4932                 dasd_sfree_request(cqr, cqr->memdev);
4933         return rc;
4934 }
4935
4936 /*
4937  * SNID - Sense Path Group ID
4938  * This ioctl may be used in situations where I/O is stalled due to
4939  * a reserve, so if the normal dasd_smalloc_request fails, we use the
4940  * preallocated dasd_reserve_req.
4941  */
4942 static int dasd_eckd_snid(struct dasd_device *device,
4943                           void __user *argp)
4944 {
4945         struct dasd_ccw_req *cqr;
4946         int rc;
4947         struct ccw1 *ccw;
4948         int useglobal;
4949         struct dasd_snid_ioctl_data usrparm;
4950
4951         if (!capable(CAP_SYS_ADMIN))
4952                 return -EACCES;
4953
4954         if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
4955                 return -EFAULT;
4956
4957         useglobal = 0;
4958         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
4959                                    sizeof(struct dasd_snid_data), device,
4960                                    NULL);
4961         if (IS_ERR(cqr)) {
4962                 mutex_lock(&dasd_reserve_mutex);
4963                 useglobal = 1;
4964                 cqr = &dasd_reserve_req->cqr;
4965                 memset(cqr, 0, sizeof(*cqr));
4966                 memset(&dasd_reserve_req->ccw, 0,
4967                        sizeof(dasd_reserve_req->ccw));
4968                 cqr->cpaddr = &dasd_reserve_req->ccw;
4969                 cqr->data = &dasd_reserve_req->data;
4970                 cqr->magic = DASD_ECKD_MAGIC;
4971         }
4972         ccw = cqr->cpaddr;
4973         ccw->cmd_code = DASD_ECKD_CCW_SNID;
4974         ccw->flags |= CCW_FLAG_SLI;
4975         ccw->count = 12;
4976         ccw->cda = (__u32)(addr_t) cqr->data;
4977         cqr->startdev = device;
4978         cqr->memdev = device;
4979         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4980         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4981         set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
4982         cqr->retries = 5;
4983         cqr->expires = 10 * HZ;
4984         cqr->buildclk = get_tod_clock();
4985         cqr->status = DASD_CQR_FILLED;
4986         cqr->lpm = usrparm.path_mask;
4987
4988         rc = dasd_sleep_on_immediatly(cqr);
4989         /* verify that I/O processing didn't modify the path mask */
4990         if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
4991                 rc = -EIO;
4992         if (!rc) {
4993                 usrparm.data = *((struct dasd_snid_data *)cqr->data);
4994                 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
4995                         rc = -EFAULT;
4996         }
4997
4998         if (useglobal)
4999                 mutex_unlock(&dasd_reserve_mutex);
5000         else
5001                 dasd_sfree_request(cqr, cqr->memdev);
5002         return rc;
5003 }
5004
5005 /*
5006  * Read performance statistics
5007  */
5008 static int
5009 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
5010 {
5011         struct dasd_psf_prssd_data *prssdp;
5012         struct dasd_rssd_perf_stats_t *stats;
5013         struct dasd_ccw_req *cqr;
5014         struct ccw1 *ccw;
5015         int rc;
5016
5017         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
5018                                    (sizeof(struct dasd_psf_prssd_data) +
5019                                     sizeof(struct dasd_rssd_perf_stats_t)),
5020                                    device, NULL);
5021         if (IS_ERR(cqr)) {
5022                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5023                             "Could not allocate initialization request");
5024                 return PTR_ERR(cqr);
5025         }
5026         cqr->startdev = device;
5027         cqr->memdev = device;
5028         cqr->retries = 0;
5029         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5030         cqr->expires = 10 * HZ;
5031
5032         /* Prepare for Read Subsystem Data */
5033         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5034         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5035         prssdp->order = PSF_ORDER_PRSSD;
5036         prssdp->suborder = 0x01;        /* Performance Statistics */
5037         prssdp->varies[1] = 0x01;       /* Perf Statistics for the Subsystem */
5038
5039         ccw = cqr->cpaddr;
5040         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5041         ccw->count = sizeof(struct dasd_psf_prssd_data);
5042         ccw->flags |= CCW_FLAG_CC;
5043         ccw->cda = (__u32)(addr_t) prssdp;
5044
5045         /* Read Subsystem Data - Performance Statistics */
5046         stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5047         memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
5048
5049         ccw++;
5050         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5051         ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
5052         ccw->cda = (__u32)(addr_t) stats;
5053
5054         cqr->buildclk = get_tod_clock();
5055         cqr->status = DASD_CQR_FILLED;
5056         rc = dasd_sleep_on(cqr);
5057         if (rc == 0) {
5058                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5059                 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5060                 if (copy_to_user(argp, stats,
5061                                  sizeof(struct dasd_rssd_perf_stats_t)))
5062                         rc = -EFAULT;
5063         }
5064         dasd_sfree_request(cqr, cqr->memdev);
5065         return rc;
5066 }
5067
5068 /*
5069  * Get attributes (cache operations)
5070  * Returnes the cache attributes used in Define Extend (DE).
5071  */
5072 static int
5073 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
5074 {
5075         struct dasd_eckd_private *private = device->private;
5076         struct attrib_data_t attrib = private->attrib;
5077         int rc;
5078
5079         if (!capable(CAP_SYS_ADMIN))
5080                 return -EACCES;
5081         if (!argp)
5082                 return -EINVAL;
5083
5084         rc = 0;
5085         if (copy_to_user(argp, (long *) &attrib,
5086                          sizeof(struct attrib_data_t)))
5087                 rc = -EFAULT;
5088
5089         return rc;
5090 }
5091
5092 /*
5093  * Set attributes (cache operations)
5094  * Stores the attributes for cache operation to be used in Define Extend (DE).
5095  */
5096 static int
5097 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
5098 {
5099         struct dasd_eckd_private *private = device->private;
5100         struct attrib_data_t attrib;
5101
5102         if (!capable(CAP_SYS_ADMIN))
5103                 return -EACCES;
5104         if (!argp)
5105                 return -EINVAL;
5106
5107         if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
5108                 return -EFAULT;
5109         private->attrib = attrib;
5110
5111         dev_info(&device->cdev->dev,
5112                  "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5113                  private->attrib.operation, private->attrib.nr_cyl);
5114         return 0;
5115 }
5116
5117 /*
5118  * Issue syscall I/O to EMC Symmetrix array.
5119  * CCWs are PSF and RSSD
5120  */
5121 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5122 {
5123         struct dasd_symmio_parms usrparm;
5124         char *psf_data, *rssd_result;
5125         struct dasd_ccw_req *cqr;
5126         struct ccw1 *ccw;
5127         char psf0, psf1;
5128         int rc;
5129
5130         if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5131                 return -EACCES;
5132         psf0 = psf1 = 0;
5133
5134         /* Copy parms from caller */
5135         rc = -EFAULT;
5136         if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5137                 goto out;
5138         if (is_compat_task()) {
5139                 /* Make sure pointers are sane even on 31 bit. */
5140                 rc = -EINVAL;
5141                 if ((usrparm.psf_data >> 32) != 0)
5142                         goto out;
5143                 if ((usrparm.rssd_result >> 32) != 0)
5144                         goto out;
5145                 usrparm.psf_data &= 0x7fffffffULL;
5146                 usrparm.rssd_result &= 0x7fffffffULL;
5147         }
5148         /* at least 2 bytes are accessed and should be allocated */
5149         if (usrparm.psf_data_len < 2) {
5150                 DBF_DEV_EVENT(DBF_WARNING, device,
5151                               "Symmetrix ioctl invalid data length %d",
5152                               usrparm.psf_data_len);
5153                 rc = -EINVAL;
5154                 goto out;
5155         }
5156         /* alloc I/O data area */
5157         psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5158         rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5159         if (!psf_data || !rssd_result) {
5160                 rc = -ENOMEM;
5161                 goto out_free;
5162         }
5163
5164         /* get syscall header from user space */
5165         rc = -EFAULT;
5166         if (copy_from_user(psf_data,
5167                            (void __user *)(unsigned long) usrparm.psf_data,
5168                            usrparm.psf_data_len))
5169                 goto out_free;
5170         psf0 = psf_data[0];
5171         psf1 = psf_data[1];
5172
5173         /* setup CCWs for PSF + RSSD */
5174         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5175         if (IS_ERR(cqr)) {
5176                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5177                         "Could not allocate initialization request");
5178                 rc = PTR_ERR(cqr);
5179                 goto out_free;
5180         }
5181
5182         cqr->startdev = device;
5183         cqr->memdev = device;
5184         cqr->retries = 3;
5185         cqr->expires = 10 * HZ;
5186         cqr->buildclk = get_tod_clock();
5187         cqr->status = DASD_CQR_FILLED;
5188
5189         /* Build the ccws */
5190         ccw = cqr->cpaddr;
5191
5192         /* PSF ccw */
5193         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5194         ccw->count = usrparm.psf_data_len;
5195         ccw->flags |= CCW_FLAG_CC;
5196         ccw->cda = (__u32)(addr_t) psf_data;
5197
5198         ccw++;
5199
5200         /* RSSD ccw  */
5201         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5202         ccw->count = usrparm.rssd_result_len;
5203         ccw->flags = CCW_FLAG_SLI ;
5204         ccw->cda = (__u32)(addr_t) rssd_result;
5205
5206         rc = dasd_sleep_on(cqr);
5207         if (rc)
5208                 goto out_sfree;
5209
5210         rc = -EFAULT;
5211         if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5212                            rssd_result, usrparm.rssd_result_len))
5213                 goto out_sfree;
5214         rc = 0;
5215
5216 out_sfree:
5217         dasd_sfree_request(cqr, cqr->memdev);
5218 out_free:
5219         kfree(rssd_result);
5220         kfree(psf_data);
5221 out:
5222         DBF_DEV_EVENT(DBF_WARNING, device,
5223                       "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5224                       (int) psf0, (int) psf1, rc);
5225         return rc;
5226 }
5227
5228 static int
5229 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5230 {
5231         struct dasd_device *device = block->base;
5232
5233         switch (cmd) {
5234         case BIODASDGATTR:
5235                 return dasd_eckd_get_attrib(device, argp);
5236         case BIODASDSATTR:
5237                 return dasd_eckd_set_attrib(device, argp);
5238         case BIODASDPSRD:
5239                 return dasd_eckd_performance(device, argp);
5240         case BIODASDRLSE:
5241                 return dasd_eckd_release(device);
5242         case BIODASDRSRV:
5243                 return dasd_eckd_reserve(device);
5244         case BIODASDSLCK:
5245                 return dasd_eckd_steal_lock(device);
5246         case BIODASDSNID:
5247                 return dasd_eckd_snid(device, argp);
5248         case BIODASDSYMMIO:
5249                 return dasd_symm_io(device, argp);
5250         default:
5251                 return -ENOTTY;
5252         }
5253 }
5254
5255 /*
5256  * Dump the range of CCWs into 'page' buffer
5257  * and return number of printed chars.
5258  */
5259 static int
5260 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
5261 {
5262         int len, count;
5263         char *datap;
5264
5265         len = 0;
5266         while (from <= to) {
5267                 len += sprintf(page + len, PRINTK_HEADER
5268                                " CCW %p: %08X %08X DAT:",
5269                                from, ((int *) from)[0], ((int *) from)[1]);
5270
5271                 /* get pointer to data (consider IDALs) */
5272                 if (from->flags & CCW_FLAG_IDA)
5273                         datap = (char *) *((addr_t *) (addr_t) from->cda);
5274                 else
5275                         datap = (char *) ((addr_t) from->cda);
5276
5277                 /* dump data (max 32 bytes) */
5278                 for (count = 0; count < from->count && count < 32; count++) {
5279                         if (count % 8 == 0) len += sprintf(page + len, " ");
5280                         if (count % 4 == 0) len += sprintf(page + len, " ");
5281                         len += sprintf(page + len, "%02x", datap[count]);
5282                 }
5283                 len += sprintf(page + len, "\n");
5284                 from++;
5285         }
5286         return len;
5287 }
5288
5289 static void
5290 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5291                          char *reason)
5292 {
5293         u64 *sense;
5294         u64 *stat;
5295
5296         sense = (u64 *) dasd_get_sense(irb);
5297         stat = (u64 *) &irb->scsw;
5298         if (sense) {
5299                 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5300                               "%016llx %016llx %016llx %016llx",
5301                               reason, *stat, *((u32 *) (stat + 1)),
5302                               sense[0], sense[1], sense[2], sense[3]);
5303         } else {
5304                 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5305                               reason, *stat, *((u32 *) (stat + 1)),
5306                               "NO VALID SENSE");
5307         }
5308 }
5309
5310 /*
5311  * Print sense data and related channel program.
5312  * Parts are printed because printk buffer is only 1024 bytes.
5313  */
5314 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
5315                                  struct dasd_ccw_req *req, struct irb *irb)
5316 {
5317         char *page;
5318         struct ccw1 *first, *last, *fail, *from, *to;
5319         int len, sl, sct;
5320
5321         page = (char *) get_zeroed_page(GFP_ATOMIC);
5322         if (page == NULL) {
5323                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5324                               "No memory to dump sense data\n");
5325                 return;
5326         }
5327         /* dump the sense data */
5328         len = sprintf(page, PRINTK_HEADER
5329                       " I/O status report for device %s:\n",
5330                       dev_name(&device->cdev->dev));
5331         len += sprintf(page + len, PRINTK_HEADER
5332                        " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5333                        "CS:%02X RC:%d\n",
5334                        req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5335                        scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5336                        scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5337                        req ? req->intrc : 0);
5338         len += sprintf(page + len, PRINTK_HEADER
5339                        " device %s: Failing CCW: %p\n",
5340                        dev_name(&device->cdev->dev),
5341                        (void *) (addr_t) irb->scsw.cmd.cpa);
5342         if (irb->esw.esw0.erw.cons) {
5343                 for (sl = 0; sl < 4; sl++) {
5344                         len += sprintf(page + len, PRINTK_HEADER
5345                                        " Sense(hex) %2d-%2d:",
5346                                        (8 * sl), ((8 * sl) + 7));
5347
5348                         for (sct = 0; sct < 8; sct++) {
5349                                 len += sprintf(page + len, " %02x",
5350                                                irb->ecw[8 * sl + sct]);
5351                         }
5352                         len += sprintf(page + len, "\n");
5353                 }
5354
5355                 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5356                         /* 24 Byte Sense Data */
5357                         sprintf(page + len, PRINTK_HEADER
5358                                 " 24 Byte: %x MSG %x, "
5359                                 "%s MSGb to SYSOP\n",
5360                                 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5361                                 irb->ecw[1] & 0x10 ? "" : "no");
5362                 } else {
5363                         /* 32 Byte Sense Data */
5364                         sprintf(page + len, PRINTK_HEADER
5365                                 " 32 Byte: Format: %x "
5366                                 "Exception class %x\n",
5367                                 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5368                 }
5369         } else {
5370                 sprintf(page + len, PRINTK_HEADER
5371                         " SORRY - NO VALID SENSE AVAILABLE\n");
5372         }
5373         printk(KERN_ERR "%s", page);
5374
5375         if (req) {
5376                 /* req == NULL for unsolicited interrupts */
5377                 /* dump the Channel Program (max 140 Bytes per line) */
5378                 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
5379                 first = req->cpaddr;
5380                 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5381                 to = min(first + 6, last);
5382                 len = sprintf(page, PRINTK_HEADER
5383                               " Related CP in req: %p\n", req);
5384                 dasd_eckd_dump_ccw_range(first, to, page + len);
5385                 printk(KERN_ERR "%s", page);
5386
5387                 /* print failing CCW area (maximum 4) */
5388                 /* scsw->cda is either valid or zero  */
5389                 len = 0;
5390                 from = ++to;
5391                 fail = (struct ccw1 *)(addr_t)
5392                                 irb->scsw.cmd.cpa; /* failing CCW */
5393                 if (from <  fail - 2) {
5394                         from = fail - 2;     /* there is a gap - print header */
5395                         len += sprintf(page, PRINTK_HEADER "......\n");
5396                 }
5397                 to = min(fail + 1, last);
5398                 len += dasd_eckd_dump_ccw_range(from, to, page + len);
5399
5400                 /* print last CCWs (maximum 2) */
5401                 from = max(from, ++to);
5402                 if (from < last - 1) {
5403                         from = last - 1;     /* there is a gap - print header */
5404                         len += sprintf(page + len, PRINTK_HEADER "......\n");
5405                 }
5406                 len += dasd_eckd_dump_ccw_range(from, last, page + len);
5407                 if (len > 0)
5408                         printk(KERN_ERR "%s", page);
5409         }
5410         free_page((unsigned long) page);
5411 }
5412
5413
5414 /*
5415  * Print sense data from a tcw.
5416  */
5417 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5418                                  struct dasd_ccw_req *req, struct irb *irb)
5419 {
5420         char *page;
5421         int len, sl, sct, residual;
5422         struct tsb *tsb;
5423         u8 *sense, *rcq;
5424
5425         page = (char *) get_zeroed_page(GFP_ATOMIC);
5426         if (page == NULL) {
5427                 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5428                             "No memory to dump sense data");
5429                 return;
5430         }
5431         /* dump the sense data */
5432         len = sprintf(page, PRINTK_HEADER
5433                       " I/O status report for device %s:\n",
5434                       dev_name(&device->cdev->dev));
5435         len += sprintf(page + len, PRINTK_HEADER
5436                        " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5437                        "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5438                        req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5439                        scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5440                        scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5441                        irb->scsw.tm.fcxs,
5442                        (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5443                        req ? req->intrc : 0);
5444         len += sprintf(page + len, PRINTK_HEADER
5445                        " device %s: Failing TCW: %p\n",
5446                        dev_name(&device->cdev->dev),
5447                        (void *) (addr_t) irb->scsw.tm.tcw);
5448
5449         tsb = NULL;
5450         sense = NULL;
5451         if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5452                 tsb = tcw_get_tsb(
5453                         (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
5454
5455         if (tsb) {
5456                 len += sprintf(page + len, PRINTK_HEADER
5457                                " tsb->length %d\n", tsb->length);
5458                 len += sprintf(page + len, PRINTK_HEADER
5459                                " tsb->flags %x\n", tsb->flags);
5460                 len += sprintf(page + len, PRINTK_HEADER
5461                                " tsb->dcw_offset %d\n", tsb->dcw_offset);
5462                 len += sprintf(page + len, PRINTK_HEADER
5463                                " tsb->count %d\n", tsb->count);
5464                 residual = tsb->count - 28;
5465                 len += sprintf(page + len, PRINTK_HEADER
5466                                " residual %d\n", residual);
5467
5468                 switch (tsb->flags & 0x07) {
5469                 case 1: /* tsa_iostat */
5470                         len += sprintf(page + len, PRINTK_HEADER
5471                                " tsb->tsa.iostat.dev_time %d\n",
5472                                        tsb->tsa.iostat.dev_time);
5473                         len += sprintf(page + len, PRINTK_HEADER
5474                                " tsb->tsa.iostat.def_time %d\n",
5475                                        tsb->tsa.iostat.def_time);
5476                         len += sprintf(page + len, PRINTK_HEADER
5477                                " tsb->tsa.iostat.queue_time %d\n",
5478                                        tsb->tsa.iostat.queue_time);
5479                         len += sprintf(page + len, PRINTK_HEADER
5480                                " tsb->tsa.iostat.dev_busy_time %d\n",
5481                                        tsb->tsa.iostat.dev_busy_time);
5482                         len += sprintf(page + len, PRINTK_HEADER
5483                                " tsb->tsa.iostat.dev_act_time %d\n",
5484                                        tsb->tsa.iostat.dev_act_time);
5485                         sense = tsb->tsa.iostat.sense;
5486                         break;
5487                 case 2: /* ts_ddpc */
5488                         len += sprintf(page + len, PRINTK_HEADER
5489                                " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
5490                         for (sl = 0; sl < 2; sl++) {
5491                                 len += sprintf(page + len, PRINTK_HEADER
5492                                                " tsb->tsa.ddpc.rcq %2d-%2d: ",
5493                                                (8 * sl), ((8 * sl) + 7));
5494                                 rcq = tsb->tsa.ddpc.rcq;
5495                                 for (sct = 0; sct < 8; sct++) {
5496                                         len += sprintf(page + len, " %02x",
5497                                                        rcq[8 * sl + sct]);
5498                                 }
5499                                 len += sprintf(page + len, "\n");
5500                         }
5501                         sense = tsb->tsa.ddpc.sense;
5502                         break;
5503                 case 3: /* tsa_intrg */
5504                         len += sprintf(page + len, PRINTK_HEADER
5505                                       " tsb->tsa.intrg.: not supported yet\n");
5506                         break;
5507                 }
5508
5509                 if (sense) {
5510                         for (sl = 0; sl < 4; sl++) {
5511                                 len += sprintf(page + len, PRINTK_HEADER
5512                                                " Sense(hex) %2d-%2d:",
5513                                                (8 * sl), ((8 * sl) + 7));
5514                                 for (sct = 0; sct < 8; sct++) {
5515                                         len += sprintf(page + len, " %02x",
5516                                                        sense[8 * sl + sct]);
5517                                 }
5518                                 len += sprintf(page + len, "\n");
5519                         }
5520
5521                         if (sense[27] & DASD_SENSE_BIT_0) {
5522                                 /* 24 Byte Sense Data */
5523                                 sprintf(page + len, PRINTK_HEADER
5524                                         " 24 Byte: %x MSG %x, "
5525                                         "%s MSGb to SYSOP\n",
5526                                         sense[7] >> 4, sense[7] & 0x0f,
5527                                         sense[1] & 0x10 ? "" : "no");
5528                         } else {
5529                                 /* 32 Byte Sense Data */
5530                                 sprintf(page + len, PRINTK_HEADER
5531                                         " 32 Byte: Format: %x "
5532                                         "Exception class %x\n",
5533                                         sense[6] & 0x0f, sense[22] >> 4);
5534                         }
5535                 } else {
5536                         sprintf(page + len, PRINTK_HEADER
5537                                 " SORRY - NO VALID SENSE AVAILABLE\n");
5538                 }
5539         } else {
5540                 sprintf(page + len, PRINTK_HEADER
5541                         " SORRY - NO TSB DATA AVAILABLE\n");
5542         }
5543         printk(KERN_ERR "%s", page);
5544         free_page((unsigned long) page);
5545 }
5546
5547 static void dasd_eckd_dump_sense(struct dasd_device *device,
5548                                  struct dasd_ccw_req *req, struct irb *irb)
5549 {
5550         u8 *sense = dasd_get_sense(irb);
5551
5552         if (scsw_is_tm(&irb->scsw)) {
5553                 /*
5554                  * In some cases the 'File Protected' or 'Incorrect Length'
5555                  * error might be expected and log messages shouldn't be written
5556                  * then. Check if the according suppress bit is set.
5557                  */
5558                 if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5559                     test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5560                         return;
5561                 if (scsw_cstat(&irb->scsw) == 0x40 &&
5562                     test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5563                         return;
5564
5565                 dasd_eckd_dump_sense_tcw(device, req, irb);
5566         } else {
5567                 /*
5568                  * In some cases the 'Command Reject' or 'No Record Found'
5569                  * error might be expected and log messages shouldn't be
5570                  * written then. Check if the according suppress bit is set.
5571                  */
5572                 if (sense && sense[0] & SNS0_CMD_REJECT &&
5573                     test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5574                         return;
5575
5576                 if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5577                     test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5578                         return;
5579
5580                 dasd_eckd_dump_sense_ccw(device, req, irb);
5581         }
5582 }
5583
5584 static int dasd_eckd_pm_freeze(struct dasd_device *device)
5585 {
5586         /*
5587          * the device should be disconnected from our LCU structure
5588          * on restore we will reconnect it and reread LCU specific
5589          * information like PAV support that might have changed
5590          */
5591         dasd_alias_remove_device(device);
5592         dasd_alias_disconnect_device_from_lcu(device);
5593
5594         return 0;
5595 }
5596
5597 static int dasd_eckd_restore_device(struct dasd_device *device)
5598 {
5599         struct dasd_eckd_private *private = device->private;
5600         struct dasd_eckd_characteristics temp_rdc_data;
5601         int rc;
5602         struct dasd_uid temp_uid;
5603         unsigned long flags;
5604         unsigned long cqr_flags = 0;
5605
5606         /* Read Configuration Data */
5607         rc = dasd_eckd_read_conf(device);
5608         if (rc) {
5609                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5610                                 "Read configuration data failed, rc=%d", rc);
5611                 goto out_err;
5612         }
5613
5614         dasd_eckd_get_uid(device, &temp_uid);
5615         /* Generate device unique id */
5616         rc = dasd_eckd_generate_uid(device);
5617         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5618         if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
5619                 dev_err(&device->cdev->dev, "The UID of the DASD has "
5620                         "changed\n");
5621         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5622         if (rc)
5623                 goto out_err;
5624
5625         /* register lcu with alias handling, enable PAV if this is a new lcu */
5626         rc = dasd_alias_make_device_known_to_lcu(device);
5627         if (rc)
5628                 goto out_err;
5629
5630         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
5631         dasd_eckd_validate_server(device, cqr_flags);
5632
5633         /* RE-Read Configuration Data */
5634         rc = dasd_eckd_read_conf(device);
5635         if (rc) {
5636                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5637                         "Read configuration data failed, rc=%d", rc);
5638                 goto out_err2;
5639         }
5640
5641         /* Read Feature Codes */
5642         dasd_eckd_read_features(device);
5643
5644         /* Read Volume Information */
5645         dasd_eckd_read_vol_info(device);
5646
5647         /* Read Extent Pool Information */
5648         dasd_eckd_read_ext_pool_info(device);
5649
5650         /* Read Device Characteristics */
5651         rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
5652                                          &temp_rdc_data, 64);
5653         if (rc) {
5654                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5655                                 "Read device characteristic failed, rc=%d", rc);
5656                 goto out_err2;
5657         }
5658         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5659         memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
5660         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5661
5662         /* add device to alias management */
5663         dasd_alias_add_device(device);
5664
5665         return 0;
5666
5667 out_err2:
5668         dasd_alias_disconnect_device_from_lcu(device);
5669 out_err:
5670         return -1;
5671 }
5672
5673 static int dasd_eckd_reload_device(struct dasd_device *device)
5674 {
5675         struct dasd_eckd_private *private = device->private;
5676         int rc, old_base;
5677         char print_uid[60];
5678         struct dasd_uid uid;
5679         unsigned long flags;
5680
5681         /*
5682          * remove device from alias handling to prevent new requests
5683          * from being scheduled on the wrong alias device
5684          */
5685         dasd_alias_remove_device(device);
5686
5687         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5688         old_base = private->uid.base_unit_addr;
5689         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5690
5691         /* Read Configuration Data */
5692         rc = dasd_eckd_read_conf(device);
5693         if (rc)
5694                 goto out_err;
5695
5696         rc = dasd_eckd_generate_uid(device);
5697         if (rc)
5698                 goto out_err;
5699         /*
5700          * update unit address configuration and
5701          * add device to alias management
5702          */
5703         dasd_alias_update_add_device(device);
5704
5705         dasd_eckd_get_uid(device, &uid);
5706
5707         if (old_base != uid.base_unit_addr) {
5708                 if (strlen(uid.vduit) > 0)
5709                         snprintf(print_uid, sizeof(print_uid),
5710                                  "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
5711                                  uid.ssid, uid.base_unit_addr, uid.vduit);
5712                 else
5713                         snprintf(print_uid, sizeof(print_uid),
5714                                  "%s.%s.%04x.%02x", uid.vendor, uid.serial,
5715                                  uid.ssid, uid.base_unit_addr);
5716
5717                 dev_info(&device->cdev->dev,
5718                          "An Alias device was reassigned to a new base device "
5719                          "with UID: %s\n", print_uid);
5720         }
5721         return 0;
5722
5723 out_err:
5724         return -1;
5725 }
5726
5727 static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5728                                          struct dasd_rssd_messages *messages,
5729                                          __u8 lpum)
5730 {
5731         struct dasd_rssd_messages *message_buf;
5732         struct dasd_psf_prssd_data *prssdp;
5733         struct dasd_ccw_req *cqr;
5734         struct ccw1 *ccw;
5735         int rc;
5736
5737         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5738                                    (sizeof(struct dasd_psf_prssd_data) +
5739                                     sizeof(struct dasd_rssd_messages)),
5740                                    device, NULL);
5741         if (IS_ERR(cqr)) {
5742                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5743                                 "Could not allocate read message buffer request");
5744                 return PTR_ERR(cqr);
5745         }
5746
5747         cqr->lpm = lpum;
5748 retry:
5749         cqr->startdev = device;
5750         cqr->memdev = device;
5751         cqr->block = NULL;
5752         cqr->expires = 10 * HZ;
5753         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5754         /* dasd_sleep_on_immediatly does not do complex error
5755          * recovery so clear erp flag and set retry counter to
5756          * do basic erp */
5757         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5758         cqr->retries = 256;
5759
5760         /* Prepare for Read Subsystem Data */
5761         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5762         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5763         prssdp->order = PSF_ORDER_PRSSD;
5764         prssdp->suborder = 0x03;        /* Message Buffer */
5765         /* all other bytes of prssdp must be zero */
5766
5767         ccw = cqr->cpaddr;
5768         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5769         ccw->count = sizeof(struct dasd_psf_prssd_data);
5770         ccw->flags |= CCW_FLAG_CC;
5771         ccw->flags |= CCW_FLAG_SLI;
5772         ccw->cda = (__u32)(addr_t) prssdp;
5773
5774         /* Read Subsystem Data - message buffer */
5775         message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5776         memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5777
5778         ccw++;
5779         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5780         ccw->count = sizeof(struct dasd_rssd_messages);
5781         ccw->flags |= CCW_FLAG_SLI;
5782         ccw->cda = (__u32)(addr_t) message_buf;
5783
5784         cqr->buildclk = get_tod_clock();
5785         cqr->status = DASD_CQR_FILLED;
5786         rc = dasd_sleep_on_immediatly(cqr);
5787         if (rc == 0) {
5788                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5789                 message_buf = (struct dasd_rssd_messages *)
5790                         (prssdp + 1);
5791                 memcpy(messages, message_buf,
5792                        sizeof(struct dasd_rssd_messages));
5793         } else if (cqr->lpm) {
5794                 /*
5795                  * on z/VM we might not be able to do I/O on the requested path
5796                  * but instead we get the required information on any path
5797                  * so retry with open path mask
5798                  */
5799                 cqr->lpm = 0;
5800                 goto retry;
5801         } else
5802                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5803                                 "Reading messages failed with rc=%d\n"
5804                                 , rc);
5805         dasd_sfree_request(cqr, cqr->memdev);
5806         return rc;
5807 }
5808
5809 static int dasd_eckd_query_host_access(struct dasd_device *device,
5810                                        struct dasd_psf_query_host_access *data)
5811 {
5812         struct dasd_eckd_private *private = device->private;
5813         struct dasd_psf_query_host_access *host_access;
5814         struct dasd_psf_prssd_data *prssdp;
5815         struct dasd_ccw_req *cqr;
5816         struct ccw1 *ccw;
5817         int rc;
5818
5819         /* not available for HYPER PAV alias devices */
5820         if (!device->block && private->lcu->pav == HYPER_PAV)
5821                 return -EOPNOTSUPP;
5822
5823         /* may not be supported by the storage server */
5824         if (!(private->features.feature[14] & 0x80))
5825                 return -EOPNOTSUPP;
5826
5827         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5828                                    sizeof(struct dasd_psf_prssd_data) + 1,
5829                                    device, NULL);
5830         if (IS_ERR(cqr)) {
5831                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5832                                 "Could not allocate read message buffer request");
5833                 return PTR_ERR(cqr);
5834         }
5835         host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5836         if (!host_access) {
5837                 dasd_sfree_request(cqr, device);
5838                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5839                                 "Could not allocate host_access buffer");
5840                 return -ENOMEM;
5841         }
5842         cqr->startdev = device;
5843         cqr->memdev = device;
5844         cqr->block = NULL;
5845         cqr->retries = 256;
5846         cqr->expires = 10 * HZ;
5847
5848         /* Prepare for Read Subsystem Data */
5849         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5850         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5851         prssdp->order = PSF_ORDER_PRSSD;
5852         prssdp->suborder = PSF_SUBORDER_QHA;    /* query host access */
5853         /* LSS and Volume that will be queried */
5854         prssdp->lss = private->ned->ID;
5855         prssdp->volume = private->ned->unit_addr;
5856         /* all other bytes of prssdp must be zero */
5857
5858         ccw = cqr->cpaddr;
5859         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5860         ccw->count = sizeof(struct dasd_psf_prssd_data);
5861         ccw->flags |= CCW_FLAG_CC;
5862         ccw->flags |= CCW_FLAG_SLI;
5863         ccw->cda = (__u32)(addr_t) prssdp;
5864
5865         /* Read Subsystem Data - query host access */
5866         ccw++;
5867         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5868         ccw->count = sizeof(struct dasd_psf_query_host_access);
5869         ccw->flags |= CCW_FLAG_SLI;
5870         ccw->cda = (__u32)(addr_t) host_access;
5871
5872         cqr->buildclk = get_tod_clock();
5873         cqr->status = DASD_CQR_FILLED;
5874         /* the command might not be supported, suppress error message */
5875         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
5876         rc = dasd_sleep_on_interruptible(cqr);
5877         if (rc == 0) {
5878                 *data = *host_access;
5879         } else {
5880                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5881                                 "Reading host access data failed with rc=%d\n",
5882                                 rc);
5883                 rc = -EOPNOTSUPP;
5884         }
5885
5886         dasd_sfree_request(cqr, cqr->memdev);
5887         kfree(host_access);
5888         return rc;
5889 }
5890 /*
5891  * return number of grouped devices
5892  */
5893 static int dasd_eckd_host_access_count(struct dasd_device *device)
5894 {
5895         struct dasd_psf_query_host_access *access;
5896         struct dasd_ckd_path_group_entry *entry;
5897         struct dasd_ckd_host_information *info;
5898         int count = 0;
5899         int rc, i;
5900
5901         access = kzalloc(sizeof(*access), GFP_NOIO);
5902         if (!access) {
5903                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5904                                 "Could not allocate access buffer");
5905                 return -ENOMEM;
5906         }
5907         rc = dasd_eckd_query_host_access(device, access);
5908         if (rc) {
5909                 kfree(access);
5910                 return rc;
5911         }
5912
5913         info = (struct dasd_ckd_host_information *)
5914                 access->host_access_information;
5915         for (i = 0; i < info->entry_count; i++) {
5916                 entry = (struct dasd_ckd_path_group_entry *)
5917                         (info->entry + i * info->entry_size);
5918                 if (entry->status_flags & DASD_ECKD_PG_GROUPED)
5919                         count++;
5920         }
5921
5922         kfree(access);
5923         return count;
5924 }
5925
5926 /*
5927  * write host access information to a sequential file
5928  */
5929 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
5930 {
5931         struct dasd_psf_query_host_access *access;
5932         struct dasd_ckd_path_group_entry *entry;
5933         struct dasd_ckd_host_information *info;
5934         char sysplex[9] = "";
5935         int rc, i;
5936
5937         access = kzalloc(sizeof(*access), GFP_NOIO);
5938         if (!access) {
5939                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5940                                 "Could not allocate access buffer");
5941                 return -ENOMEM;
5942         }
5943         rc = dasd_eckd_query_host_access(device, access);
5944         if (rc) {
5945                 kfree(access);
5946                 return rc;
5947         }
5948
5949         info = (struct dasd_ckd_host_information *)
5950                 access->host_access_information;
5951         for (i = 0; i < info->entry_count; i++) {
5952                 entry = (struct dasd_ckd_path_group_entry *)
5953                         (info->entry + i * info->entry_size);
5954                 /* PGID */
5955                 seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
5956                 /* FLAGS */
5957                 seq_printf(m, "status_flags %02x\n", entry->status_flags);
5958                 /* SYSPLEX NAME */
5959                 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
5960                 EBCASC(sysplex, sizeof(sysplex));
5961                 seq_printf(m, "sysplex_name %8s\n", sysplex);
5962                 /* SUPPORTED CYLINDER */
5963                 seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
5964                 /* TIMESTAMP */
5965                 seq_printf(m, "timestamp %lu\n", (unsigned long)
5966                            entry->timestamp);
5967         }
5968         kfree(access);
5969
5970         return 0;
5971 }
5972
5973 /*
5974  * Perform Subsystem Function - CUIR response
5975  */
5976 static int
5977 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
5978                             __u32 message_id, __u8 lpum)
5979 {
5980         struct dasd_psf_cuir_response *psf_cuir;
5981         int pos = pathmask_to_pos(lpum);
5982         struct dasd_ccw_req *cqr;
5983         struct ccw1 *ccw;
5984         int rc;
5985
5986         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
5987                                    sizeof(struct dasd_psf_cuir_response),
5988                                    device, NULL);
5989
5990         if (IS_ERR(cqr)) {
5991                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5992                            "Could not allocate PSF-CUIR request");
5993                 return PTR_ERR(cqr);
5994         }
5995
5996         psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
5997         psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
5998         psf_cuir->cc = response;
5999         psf_cuir->chpid = device->path[pos].chpid;
6000         psf_cuir->message_id = message_id;
6001         psf_cuir->cssid = device->path[pos].cssid;
6002         psf_cuir->ssid = device->path[pos].ssid;
6003         ccw = cqr->cpaddr;
6004         ccw->cmd_code = DASD_ECKD_CCW_PSF;
6005         ccw->cda = (__u32)(addr_t)psf_cuir;
6006         ccw->flags = CCW_FLAG_SLI;
6007         ccw->count = sizeof(struct dasd_psf_cuir_response);
6008
6009         cqr->startdev = device;
6010         cqr->memdev = device;
6011         cqr->block = NULL;
6012         cqr->retries = 256;
6013         cqr->expires = 10*HZ;
6014         cqr->buildclk = get_tod_clock();
6015         cqr->status = DASD_CQR_FILLED;
6016         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6017
6018         rc = dasd_sleep_on(cqr);
6019
6020         dasd_sfree_request(cqr, cqr->memdev);
6021         return rc;
6022 }
6023
6024 /*
6025  * return configuration data that is referenced by record selector
6026  * if a record selector is specified or per default return the
6027  * conf_data pointer for the path specified by lpum
6028  */
6029 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
6030                                                      __u8 lpum,
6031                                                      struct dasd_cuir_message *cuir)
6032 {
6033         struct dasd_conf_data *conf_data;
6034         int path, pos;
6035
6036         if (cuir->record_selector == 0)
6037                 goto out;
6038         for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
6039                 conf_data = device->path[pos].conf_data;
6040                 if (conf_data->gneq.record_selector ==
6041                     cuir->record_selector)
6042                         return conf_data;
6043         }
6044 out:
6045         return device->path[pathmask_to_pos(lpum)].conf_data;
6046 }
6047
6048 /*
6049  * This function determines the scope of a reconfiguration request by
6050  * analysing the path and device selection data provided in the CUIR request.
6051  * Returns a path mask containing CUIR affected paths for the give device.
6052  *
6053  * If the CUIR request does not contain the required information return the
6054  * path mask of the path the attention message for the CUIR request was reveived
6055  * on.
6056  */
6057 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
6058                                 struct dasd_cuir_message *cuir)
6059 {
6060         struct dasd_conf_data *ref_conf_data;
6061         unsigned long bitmask = 0, mask = 0;
6062         struct dasd_conf_data *conf_data;
6063         unsigned int pos, path;
6064         char *ref_gneq, *gneq;
6065         char *ref_ned, *ned;
6066         int tbcpm = 0;
6067
6068         /* if CUIR request does not specify the scope use the path
6069            the attention message was presented on */
6070         if (!cuir->ned_map ||
6071             !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6072                 return lpum;
6073
6074         /* get reference conf data */
6075         ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6076         /* reference ned is determined by ned_map field */
6077         pos = 8 - ffs(cuir->ned_map);
6078         ref_ned = (char *)&ref_conf_data->neds[pos];
6079         ref_gneq = (char *)&ref_conf_data->gneq;
6080         /* transfer 24 bit neq_map to mask */
6081         mask = cuir->neq_map[2];
6082         mask |= cuir->neq_map[1] << 8;
6083         mask |= cuir->neq_map[0] << 16;
6084
6085         for (path = 0; path < 8; path++) {
6086                 /* initialise data per path */
6087                 bitmask = mask;
6088                 conf_data = device->path[path].conf_data;
6089                 pos = 8 - ffs(cuir->ned_map);
6090                 ned = (char *) &conf_data->neds[pos];
6091                 /* compare reference ned and per path ned */
6092                 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6093                         continue;
6094                 gneq = (char *)&conf_data->gneq;
6095                 /* compare reference gneq and per_path gneq under
6096                    24 bit mask where mask bit 0 equals byte 7 of
6097                    the gneq and mask bit 24 equals byte 31 */
6098                 while (bitmask) {
6099                         pos = ffs(bitmask) - 1;
6100                         if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6101                             != 0)
6102                                 break;
6103                         clear_bit(pos, &bitmask);
6104                 }
6105                 if (bitmask)
6106                         continue;
6107                 /* device and path match the reference values
6108                    add path to CUIR scope */
6109                 tbcpm |= 0x80 >> path;
6110         }
6111         return tbcpm;
6112 }
6113
6114 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
6115                                        unsigned long paths, int action)
6116 {
6117         int pos;
6118
6119         while (paths) {
6120                 /* get position of bit in mask */
6121                 pos = 8 - ffs(paths);
6122                 /* get channel path descriptor from this position */
6123                 if (action == CUIR_QUIESCE)
6124                         pr_warn("Service on the storage server caused path %x.%02x to go offline",
6125                                 device->path[pos].cssid,
6126                                 device->path[pos].chpid);
6127                 else if (action == CUIR_RESUME)
6128                         pr_info("Path %x.%02x is back online after service on the storage server",
6129                                 device->path[pos].cssid,
6130                                 device->path[pos].chpid);
6131                 clear_bit(7 - pos, &paths);
6132         }
6133 }
6134
6135 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6136                                       struct dasd_cuir_message *cuir)
6137 {
6138         unsigned long tbcpm;
6139
6140         tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6141         /* nothing to do if path is not in use */
6142         if (!(dasd_path_get_opm(device) & tbcpm))
6143                 return 0;
6144         if (!(dasd_path_get_opm(device) & ~tbcpm)) {
6145                 /* no path would be left if the CUIR action is taken
6146                    return error */
6147                 return -EINVAL;
6148         }
6149         /* remove device from operational path mask */
6150         dasd_path_remove_opm(device, tbcpm);
6151         dasd_path_add_cuirpm(device, tbcpm);
6152         return tbcpm;
6153 }
6154
6155 /*
6156  * walk through all devices and build a path mask to quiesce them
6157  * return an error if the last path to a device would be removed
6158  *
6159  * if only part of the devices are quiesced and an error
6160  * occurs no onlining necessary, the storage server will
6161  * notify the already set offline devices again
6162  */
6163 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
6164                                   struct dasd_cuir_message *cuir)
6165 {
6166         struct dasd_eckd_private *private = device->private;
6167         struct alias_pav_group *pavgroup, *tempgroup;
6168         struct dasd_device *dev, *n;
6169         unsigned long paths = 0;
6170         unsigned long flags;
6171         int tbcpm;
6172
6173         /* active devices */
6174         list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6175                                  alias_list) {
6176                 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6177                 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6178                 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6179                 if (tbcpm < 0)
6180                         goto out_err;
6181                 paths |= tbcpm;
6182         }
6183         /* inactive devices */
6184         list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6185                                  alias_list) {
6186                 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6187                 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6188                 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6189                 if (tbcpm < 0)
6190                         goto out_err;
6191                 paths |= tbcpm;
6192         }
6193         /* devices in PAV groups */
6194         list_for_each_entry_safe(pavgroup, tempgroup,
6195                                  &private->lcu->grouplist, group) {
6196                 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6197                                          alias_list) {
6198                         spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6199                         tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6200                         spin_unlock_irqrestore(
6201                                 get_ccwdev_lock(dev->cdev), flags);
6202                         if (tbcpm < 0)
6203                                 goto out_err;
6204                         paths |= tbcpm;
6205                 }
6206                 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6207                                          alias_list) {
6208                         spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6209                         tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6210                         spin_unlock_irqrestore(
6211                                 get_ccwdev_lock(dev->cdev), flags);
6212                         if (tbcpm < 0)
6213                                 goto out_err;
6214                         paths |= tbcpm;
6215                 }
6216         }
6217         /* notify user about all paths affected by CUIR action */
6218         dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
6219         return 0;
6220 out_err:
6221         return tbcpm;
6222 }
6223
6224 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
6225                                  struct dasd_cuir_message *cuir)
6226 {
6227         struct dasd_eckd_private *private = device->private;
6228         struct alias_pav_group *pavgroup, *tempgroup;
6229         struct dasd_device *dev, *n;
6230         unsigned long paths = 0;
6231         int tbcpm;
6232
6233         /*
6234          * the path may have been added through a generic path event before
6235          * only trigger path verification if the path is not already in use
6236          */
6237         list_for_each_entry_safe(dev, n,
6238                                  &private->lcu->active_devices,
6239                                  alias_list) {
6240                 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6241                 paths |= tbcpm;
6242                 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6243                         dasd_path_add_tbvpm(dev, tbcpm);
6244                         dasd_schedule_device_bh(dev);
6245                 }
6246         }
6247         list_for_each_entry_safe(dev, n,
6248                                  &private->lcu->inactive_devices,
6249                                  alias_list) {
6250                 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6251                 paths |= tbcpm;
6252                 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6253                         dasd_path_add_tbvpm(dev, tbcpm);
6254                         dasd_schedule_device_bh(dev);
6255                 }
6256         }
6257         /* devices in PAV groups */
6258         list_for_each_entry_safe(pavgroup, tempgroup,
6259                                  &private->lcu->grouplist,
6260                                  group) {
6261                 list_for_each_entry_safe(dev, n,
6262                                          &pavgroup->baselist,
6263                                          alias_list) {
6264                         tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6265                         paths |= tbcpm;
6266                         if (!(dasd_path_get_opm(dev) & tbcpm)) {
6267                                 dasd_path_add_tbvpm(dev, tbcpm);
6268                                 dasd_schedule_device_bh(dev);
6269                         }
6270                 }
6271                 list_for_each_entry_safe(dev, n,
6272                                          &pavgroup->aliaslist,
6273                                          alias_list) {
6274                         tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6275                         paths |= tbcpm;
6276                         if (!(dasd_path_get_opm(dev) & tbcpm)) {
6277                                 dasd_path_add_tbvpm(dev, tbcpm);
6278                                 dasd_schedule_device_bh(dev);
6279                         }
6280                 }
6281         }
6282         /* notify user about all paths affected by CUIR action */
6283         dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
6284         return 0;
6285 }
6286
6287 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6288                                  __u8 lpum)
6289 {
6290         struct dasd_cuir_message *cuir = messages;
6291         int response;
6292
6293         DBF_DEV_EVENT(DBF_WARNING, device,
6294                       "CUIR request: %016llx %016llx %016llx %08x",
6295                       ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6296                       ((u32 *)cuir)[3]);
6297
6298         if (cuir->code == CUIR_QUIESCE) {
6299                 /* quiesce */
6300                 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
6301                         response = PSF_CUIR_LAST_PATH;
6302                 else
6303                         response = PSF_CUIR_COMPLETED;
6304         } else if (cuir->code == CUIR_RESUME) {
6305                 /* resume */
6306                 dasd_eckd_cuir_resume(device, lpum, cuir);
6307                 response = PSF_CUIR_COMPLETED;
6308         } else
6309                 response = PSF_CUIR_NOT_SUPPORTED;
6310
6311         dasd_eckd_psf_cuir_response(device, response,
6312                                     cuir->message_id, lpum);
6313         DBF_DEV_EVENT(DBF_WARNING, device,
6314                       "CUIR response: %d on message ID %08x", response,
6315                       cuir->message_id);
6316         /* to make sure there is no attention left schedule work again */
6317         device->discipline->check_attention(device, lpum);
6318 }
6319
6320 static void dasd_eckd_oos_resume(struct dasd_device *device)
6321 {
6322         struct dasd_eckd_private *private = device->private;
6323         struct alias_pav_group *pavgroup, *tempgroup;
6324         struct dasd_device *dev, *n;
6325         unsigned long flags;
6326
6327         spin_lock_irqsave(&private->lcu->lock, flags);
6328         list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6329                                  alias_list) {
6330                 if (dev->stopped & DASD_STOPPED_NOSPC)
6331                         dasd_generic_space_avail(dev);
6332         }
6333         list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6334                                  alias_list) {
6335                 if (dev->stopped & DASD_STOPPED_NOSPC)
6336                         dasd_generic_space_avail(dev);
6337         }
6338         /* devices in PAV groups */
6339         list_for_each_entry_safe(pavgroup, tempgroup,
6340                                  &private->lcu->grouplist,
6341                                  group) {
6342                 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6343                                          alias_list) {
6344                         if (dev->stopped & DASD_STOPPED_NOSPC)
6345                                 dasd_generic_space_avail(dev);
6346                 }
6347                 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6348                                          alias_list) {
6349                         if (dev->stopped & DASD_STOPPED_NOSPC)
6350                                 dasd_generic_space_avail(dev);
6351                 }
6352         }
6353         spin_unlock_irqrestore(&private->lcu->lock, flags);
6354 }
6355
6356 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
6357                                  __u8 lpum)
6358 {
6359         struct dasd_oos_message *oos = messages;
6360
6361         switch (oos->code) {
6362         case REPO_WARN:
6363         case POOL_WARN:
6364                 dev_warn(&device->cdev->dev,
6365                          "Extent pool usage has reached a critical value\n");
6366                 dasd_eckd_oos_resume(device);
6367                 break;
6368         case REPO_EXHAUST:
6369         case POOL_EXHAUST:
6370                 dev_warn(&device->cdev->dev,
6371                          "Extent pool is exhausted\n");
6372                 break;
6373         case REPO_RELIEVE:
6374         case POOL_RELIEVE:
6375                 dev_info(&device->cdev->dev,
6376                          "Extent pool physical space constraint has been relieved\n");
6377                 break;
6378         }
6379
6380         /* In any case, update related data */
6381         dasd_eckd_read_ext_pool_info(device);
6382
6383         /* to make sure there is no attention left schedule work again */
6384         device->discipline->check_attention(device, lpum);
6385 }
6386
6387 static void dasd_eckd_check_attention_work(struct work_struct *work)
6388 {
6389         struct check_attention_work_data *data;
6390         struct dasd_rssd_messages *messages;
6391         struct dasd_device *device;
6392         int rc;
6393
6394         data = container_of(work, struct check_attention_work_data, worker);
6395         device = data->device;
6396         messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6397         if (!messages) {
6398                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6399                               "Could not allocate attention message buffer");
6400                 goto out;
6401         }
6402         rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6403         if (rc)
6404                 goto out;
6405
6406         if (messages->length == ATTENTION_LENGTH_CUIR &&
6407             messages->format == ATTENTION_FORMAT_CUIR)
6408                 dasd_eckd_handle_cuir(device, messages, data->lpum);
6409         if (messages->length == ATTENTION_LENGTH_OOS &&
6410             messages->format == ATTENTION_FORMAT_OOS)
6411                 dasd_eckd_handle_oos(device, messages, data->lpum);
6412
6413 out:
6414         dasd_put_device(device);
6415         kfree(messages);
6416         kfree(data);
6417 }
6418
6419 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6420 {
6421         struct check_attention_work_data *data;
6422
6423         data = kzalloc(sizeof(*data), GFP_ATOMIC);
6424         if (!data)
6425                 return -ENOMEM;
6426         INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6427         dasd_get_device(device);
6428         data->device = device;
6429         data->lpum = lpum;
6430         schedule_work(&data->worker);
6431         return 0;
6432 }
6433
6434 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6435 {
6436         if (~lpum & dasd_path_get_opm(device)) {
6437                 dasd_path_add_nohpfpm(device, lpum);
6438                 dasd_path_remove_opm(device, lpum);
6439                 dev_err(&device->cdev->dev,
6440                         "Channel path %02X lost HPF functionality and is disabled\n",
6441                         lpum);
6442                 return 1;
6443         }
6444         return 0;
6445 }
6446
6447 static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6448 {
6449         struct dasd_eckd_private *private = device->private;
6450
6451         dev_err(&device->cdev->dev,
6452                 "High Performance FICON disabled\n");
6453         private->fcx_max_data = 0;
6454 }
6455
6456 static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6457 {
6458         struct dasd_eckd_private *private = device->private;
6459
6460         return private->fcx_max_data ? 1 : 0;
6461 }
6462
6463 static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6464                                        struct irb *irb)
6465 {
6466         struct dasd_eckd_private *private = device->private;
6467
6468         if (!private->fcx_max_data) {
6469                 /* sanity check for no HPF, the error makes no sense */
6470                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6471                               "Trying to disable HPF for a non HPF device");
6472                 return;
6473         }
6474         if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6475                 dasd_eckd_disable_hpf_device(device);
6476         } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6477                 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6478                         return;
6479                 dasd_eckd_disable_hpf_device(device);
6480                 dasd_path_set_tbvpm(device,
6481                                   dasd_path_get_hpfpm(device));
6482         }
6483         /*
6484          * prevent that any new I/O ist started on the device and schedule a
6485          * requeue of existing requests
6486          */
6487         dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6488         dasd_schedule_requeue(device);
6489 }
6490
6491 /*
6492  * Initialize block layer request queue.
6493  */
6494 static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
6495 {
6496         unsigned int logical_block_size = block->bp_block;
6497         struct request_queue *q = block->request_queue;
6498         struct dasd_device *device = block->base;
6499         int max;
6500
6501         if (device->features & DASD_FEATURE_USERAW) {
6502                 /*
6503                  * the max_blocks value for raw_track access is 256
6504                  * it is higher than the native ECKD value because we
6505                  * only need one ccw per track
6506                  * so the max_hw_sectors are
6507                  * 2048 x 512B = 1024kB = 16 tracks
6508                  */
6509                 max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6510         } else {
6511                 max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
6512         }
6513         blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
6514         q->limits.max_dev_sectors = max;
6515         blk_queue_logical_block_size(q, logical_block_size);
6516         blk_queue_max_hw_sectors(q, max);
6517         blk_queue_max_segments(q, USHRT_MAX);
6518         /* With page sized segments each segment can be translated into one idaw/tidaw */
6519         blk_queue_max_segment_size(q, PAGE_SIZE);
6520         blk_queue_segment_boundary(q, PAGE_SIZE - 1);
6521 }
6522
6523 static struct ccw_driver dasd_eckd_driver = {
6524         .driver = {
6525                 .name   = "dasd-eckd",
6526                 .owner  = THIS_MODULE,
6527         },
6528         .ids         = dasd_eckd_ids,
6529         .probe       = dasd_eckd_probe,
6530         .remove      = dasd_generic_remove,
6531         .set_offline = dasd_generic_set_offline,
6532         .set_online  = dasd_eckd_set_online,
6533         .notify      = dasd_generic_notify,
6534         .path_event  = dasd_generic_path_event,
6535         .shutdown    = dasd_generic_shutdown,
6536         .freeze      = dasd_generic_pm_freeze,
6537         .thaw        = dasd_generic_restore_device,
6538         .restore     = dasd_generic_restore_device,
6539         .uc_handler  = dasd_generic_uc_handler,
6540         .int_class   = IRQIO_DAS,
6541 };
6542
6543 static struct dasd_discipline dasd_eckd_discipline = {
6544         .owner = THIS_MODULE,
6545         .name = "ECKD",
6546         .ebcname = "ECKD",
6547         .check_device = dasd_eckd_check_characteristics,
6548         .uncheck_device = dasd_eckd_uncheck_device,
6549         .do_analysis = dasd_eckd_do_analysis,
6550         .verify_path = dasd_eckd_verify_path,
6551         .basic_to_ready = dasd_eckd_basic_to_ready,
6552         .online_to_ready = dasd_eckd_online_to_ready,
6553         .basic_to_known = dasd_eckd_basic_to_known,
6554         .setup_blk_queue = dasd_eckd_setup_blk_queue,
6555         .fill_geometry = dasd_eckd_fill_geometry,
6556         .start_IO = dasd_start_IO,
6557         .term_IO = dasd_term_IO,
6558         .handle_terminated_request = dasd_eckd_handle_terminated_request,
6559         .format_device = dasd_eckd_format_device,
6560         .check_device_format = dasd_eckd_check_device_format,
6561         .erp_action = dasd_eckd_erp_action,
6562         .erp_postaction = dasd_eckd_erp_postaction,
6563         .check_for_device_change = dasd_eckd_check_for_device_change,
6564         .build_cp = dasd_eckd_build_alias_cp,
6565         .free_cp = dasd_eckd_free_alias_cp,
6566         .dump_sense = dasd_eckd_dump_sense,
6567         .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6568         .fill_info = dasd_eckd_fill_info,
6569         .ioctl = dasd_eckd_ioctl,
6570         .freeze = dasd_eckd_pm_freeze,
6571         .restore = dasd_eckd_restore_device,
6572         .reload = dasd_eckd_reload_device,
6573         .get_uid = dasd_eckd_get_uid,
6574         .kick_validate = dasd_eckd_kick_validate_server,
6575         .check_attention = dasd_eckd_check_attention,
6576         .host_access_count = dasd_eckd_host_access_count,
6577         .hosts_print = dasd_hosts_print,
6578         .handle_hpf_error = dasd_eckd_handle_hpf_error,
6579         .disable_hpf = dasd_eckd_disable_hpf_device,
6580         .hpf_enabled = dasd_eckd_hpf_enabled,
6581         .reset_path = dasd_eckd_reset_path,
6582         .is_ese = dasd_eckd_is_ese,
6583         .space_allocated = dasd_eckd_space_allocated,
6584         .space_configured = dasd_eckd_space_configured,
6585         .logical_capacity = dasd_eckd_logical_capacity,
6586         .release_space = dasd_eckd_release_space,
6587         .ext_pool_id = dasd_eckd_ext_pool_id,
6588         .ext_size = dasd_eckd_ext_size,
6589         .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6590         .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6591         .ext_pool_oos = dasd_eckd_ext_pool_oos,
6592         .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
6593         .ese_format = dasd_eckd_ese_format,
6594         .ese_read = dasd_eckd_ese_read,
6595 };
6596
6597 static int __init
6598 dasd_eckd_init(void)
6599 {
6600         int ret;
6601
6602         ASCEBC(dasd_eckd_discipline.ebcname, 4);
6603         dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6604                                    GFP_KERNEL | GFP_DMA);
6605         if (!dasd_reserve_req)
6606                 return -ENOMEM;
6607         dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
6608                                     GFP_KERNEL | GFP_DMA);
6609         if (!dasd_vol_info_req)
6610                 return -ENOMEM;
6611         path_verification_worker = kmalloc(sizeof(*path_verification_worker),
6612                                    GFP_KERNEL | GFP_DMA);
6613         if (!path_verification_worker) {
6614                 kfree(dasd_reserve_req);
6615                 kfree(dasd_vol_info_req);
6616                 return -ENOMEM;
6617         }
6618         rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6619         if (!rawpadpage) {
6620                 kfree(path_verification_worker);
6621                 kfree(dasd_reserve_req);
6622                 kfree(dasd_vol_info_req);
6623                 return -ENOMEM;
6624         }
6625         ret = ccw_driver_register(&dasd_eckd_driver);
6626         if (!ret)
6627                 wait_for_device_probe();
6628         else {
6629                 kfree(path_verification_worker);
6630                 kfree(dasd_reserve_req);
6631                 kfree(dasd_vol_info_req);
6632                 free_page((unsigned long)rawpadpage);
6633         }
6634         return ret;
6635 }
6636
6637 static void __exit
6638 dasd_eckd_cleanup(void)
6639 {
6640         ccw_driver_unregister(&dasd_eckd_driver);
6641         kfree(path_verification_worker);
6642         kfree(dasd_reserve_req);
6643         free_page((unsigned long)rawpadpage);
6644 }
6645
6646 module_init(dasd_eckd_init);
6647 module_exit(dasd_eckd_cleanup);