1 // SPDX-License-Identifier: GPL-2.0
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
13 #define KMSG_COMPONENT "dasd-eckd"
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h> /* HDIO_GETGEO */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/compat.h>
22 #include <linux/init.h>
23 #include <linux/seq_file.h>
25 #include <asm/css_chars.h>
26 #include <asm/debug.h>
27 #include <asm/idals.h>
28 #include <asm/ebcdic.h>
30 #include <linux/uaccess.h>
32 #include <asm/ccwdev.h>
34 #include <asm/schid.h>
35 #include <asm/chpid.h>
38 #include "dasd_eckd.h"
42 #endif /* PRINTK_HEADER */
43 #define PRINTK_HEADER "dasd(eckd):"
46 * raw track access always map to 64k in memory
47 * so it maps to 16 blocks of 4k per track
49 #define DASD_RAW_BLOCK_PER_TRACK 16
50 #define DASD_RAW_BLOCKSIZE 4096
51 /* 64k are 128 x 512 byte sectors */
52 #define DASD_RAW_SECTORS_PER_TRACK 128
54 MODULE_LICENSE("GPL");
56 static struct dasd_discipline dasd_eckd_discipline;
58 /* The ccw bus type uses this table to find devices that it sends to
60 static struct ccw_device_id dasd_eckd_ids[] = {
61 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
62 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
63 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
64 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
66 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
67 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
68 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
69 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
70 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
71 { /* end of list */ },
74 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
76 static struct ccw_driver dasd_eckd_driver; /* see below */
78 static void *rawpadpage;
81 #define INIT_CQR_UNFORMATTED 1
82 #define INIT_CQR_ERROR 2
84 /* emergency request for reserve/release */
86 struct dasd_ccw_req cqr;
90 static DEFINE_MUTEX(dasd_reserve_mutex);
93 struct dasd_ccw_req cqr;
97 static DEFINE_MUTEX(dasd_vol_info_mutex);
99 struct ext_pool_exhaust_work_data {
100 struct work_struct worker;
101 struct dasd_device *device;
102 struct dasd_device *base;
105 /* definitions for the path verification worker */
106 struct pe_handler_work_data {
107 struct work_struct worker;
108 struct dasd_device *device;
109 struct dasd_ccw_req cqr;
111 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
116 static struct pe_handler_work_data *pe_handler_worker;
117 static DEFINE_MUTEX(dasd_pe_handler_mutex);
119 struct check_attention_work_data {
120 struct work_struct worker;
121 struct dasd_device *device;
125 static int dasd_eckd_ext_pool_id(struct dasd_device *);
126 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
127 struct dasd_device *, struct dasd_device *,
128 unsigned int, int, unsigned int, unsigned int,
129 unsigned int, unsigned int);
131 /* initial attempt at a probe function. this can be simplified once
132 * the other detection code is gone */
134 dasd_eckd_probe (struct ccw_device *cdev)
138 /* set ECKD specific ccw-device options */
139 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
140 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
142 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
143 "dasd_eckd_probe: could not set "
144 "ccw-device options");
147 ret = dasd_generic_probe(cdev);
152 dasd_eckd_set_online(struct ccw_device *cdev)
154 return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
157 static const int sizes_trk0[] = { 28, 148, 84 };
158 #define LABEL_SIZE 140
160 /* head and record addresses of count_area read in analysis ccw */
161 static const int count_area_head[] = { 0, 0, 0, 0, 1 };
162 static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
164 static inline unsigned int
165 ceil_quot(unsigned int d1, unsigned int d2)
167 return (d1 + (d2 - 1)) / d2;
171 recs_per_track(struct dasd_eckd_characteristics * rdc,
172 unsigned int kl, unsigned int dl)
176 switch (rdc->dev_type) {
179 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
180 ceil_quot(dl + 12, 32));
182 return 1499 / (15 + ceil_quot(dl + 12, 32));
184 dn = ceil_quot(dl + 6, 232) + 1;
186 kn = ceil_quot(kl + 6, 232) + 1;
187 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
188 9 + ceil_quot(dl + 6 * dn, 34));
190 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
192 dn = ceil_quot(dl + 6, 232) + 1;
194 kn = ceil_quot(kl + 6, 232) + 1;
195 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
196 ceil_quot(dl + 6 * dn, 34));
198 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
203 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
205 geo->cyl = (__u16) cyl;
206 geo->head = cyl >> 16;
212 * calculate failing track from sense data depending if
213 * it is an EAV device or not
215 static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
218 struct dasd_eckd_private *private = device->private;
223 sense = dasd_get_sense(irb);
225 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
226 "ESE error no sense data\n");
229 if (!(sense[27] & DASD_SENSE_BIT_2)) {
230 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
231 "ESE error no valid track data\n");
235 if (sense[27] & DASD_SENSE_BIT_3) {
236 /* enhanced addressing */
237 cyl = sense[30] << 20;
238 cyl |= (sense[31] & 0xF0) << 12;
239 cyl |= sense[28] << 8;
242 cyl = sense[29] << 8;
245 head = sense[31] & 0x0F;
246 *track = cyl * private->rdc_data.trk_per_cyl + head;
250 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
251 struct dasd_device *device)
253 struct dasd_eckd_private *private = device->private;
256 rc = get_phys_clock(&data->ep_sys_time);
258 * Ignore return code if XRC is not supported or
259 * sync clock is switched off
261 if ((rc && !private->rdc_data.facilities.XRC_supported) ||
262 rc == -EOPNOTSUPP || rc == -EACCES)
265 /* switch on System Time Stamp - needed for XRC Support */
266 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
267 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
270 ccw->count = sizeof(struct DE_eckd_data);
271 ccw->flags |= CCW_FLAG_SLI;
278 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
279 unsigned int totrk, int cmd, struct dasd_device *device,
282 struct dasd_eckd_private *private = device->private;
283 u16 heads, beghead, endhead;
288 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
291 ccw->cda = (__u32)__pa(data);
294 memset(data, 0, sizeof(struct DE_eckd_data));
296 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
297 case DASD_ECKD_CCW_READ_RECORD_ZERO:
298 case DASD_ECKD_CCW_READ:
299 case DASD_ECKD_CCW_READ_MT:
300 case DASD_ECKD_CCW_READ_CKD:
301 case DASD_ECKD_CCW_READ_CKD_MT:
302 case DASD_ECKD_CCW_READ_KD:
303 case DASD_ECKD_CCW_READ_KD_MT:
304 data->mask.perm = 0x1;
305 data->attributes.operation = private->attrib.operation;
307 case DASD_ECKD_CCW_READ_COUNT:
308 data->mask.perm = 0x1;
309 data->attributes.operation = DASD_BYPASS_CACHE;
311 case DASD_ECKD_CCW_READ_TRACK:
312 case DASD_ECKD_CCW_READ_TRACK_DATA:
313 data->mask.perm = 0x1;
314 data->attributes.operation = private->attrib.operation;
317 case DASD_ECKD_CCW_WRITE:
318 case DASD_ECKD_CCW_WRITE_MT:
319 case DASD_ECKD_CCW_WRITE_KD:
320 case DASD_ECKD_CCW_WRITE_KD_MT:
321 data->mask.perm = 0x02;
322 data->attributes.operation = private->attrib.operation;
323 rc = set_timestamp(ccw, data, device);
325 case DASD_ECKD_CCW_WRITE_CKD:
326 case DASD_ECKD_CCW_WRITE_CKD_MT:
327 data->attributes.operation = DASD_BYPASS_CACHE;
328 rc = set_timestamp(ccw, data, device);
330 case DASD_ECKD_CCW_ERASE:
331 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
332 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
333 data->mask.perm = 0x3;
334 data->mask.auth = 0x1;
335 data->attributes.operation = DASD_BYPASS_CACHE;
336 rc = set_timestamp(ccw, data, device);
338 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
339 data->mask.perm = 0x03;
340 data->attributes.operation = private->attrib.operation;
343 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
344 data->mask.perm = 0x02;
345 data->attributes.operation = private->attrib.operation;
346 data->blk_size = blksize;
347 rc = set_timestamp(ccw, data, device);
350 dev_err(&device->cdev->dev,
351 "0x%x is not a known command\n", cmd);
355 data->attributes.mode = 0x3; /* ECKD */
357 if ((private->rdc_data.cu_type == 0x2105 ||
358 private->rdc_data.cu_type == 0x2107 ||
359 private->rdc_data.cu_type == 0x1750)
360 && !(private->uses_cdl && trk < 2))
361 data->ga_extended |= 0x40; /* Regular Data Format Mode */
363 heads = private->rdc_data.trk_per_cyl;
364 begcyl = trk / heads;
365 beghead = trk % heads;
366 endcyl = totrk / heads;
367 endhead = totrk % heads;
369 /* check for sequential prestage - enhance cylinder range */
370 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
371 data->attributes.operation == DASD_SEQ_ACCESS) {
373 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
374 endcyl += private->attrib.nr_cyl;
376 endcyl = (private->real_cyl - 1);
379 set_ch_t(&data->beg_ext, begcyl, beghead);
380 set_ch_t(&data->end_ext, endcyl, endhead);
385 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
386 unsigned int trk, unsigned int rec_on_trk,
387 int count, int cmd, struct dasd_device *device,
388 unsigned int reclen, unsigned int tlf)
390 struct dasd_eckd_private *private = device->private;
395 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
397 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
401 ccw->cda = (__u32)__pa(data);
404 memset(data, 0, sizeof(*data));
407 switch (private->rdc_data.dev_type) {
409 dn = ceil_quot(reclen + 6, 232);
410 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
411 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
414 d = 7 + ceil_quot(reclen + 12, 32);
415 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
419 data->sector = sector;
420 /* note: meaning of count depends on the operation
421 * for record based I/O it's the number of records, but for
422 * track based I/O it's the number of tracks
426 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
427 data->operation.orientation = 0x3;
428 data->operation.operation = 0x03;
430 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
431 data->operation.orientation = 0x3;
432 data->operation.operation = 0x16;
434 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
435 data->operation.orientation = 0x1;
436 data->operation.operation = 0x03;
439 case DASD_ECKD_CCW_READ_RECORD_ZERO:
440 data->operation.orientation = 0x3;
441 data->operation.operation = 0x16;
444 case DASD_ECKD_CCW_WRITE:
445 case DASD_ECKD_CCW_WRITE_MT:
446 case DASD_ECKD_CCW_WRITE_KD:
447 case DASD_ECKD_CCW_WRITE_KD_MT:
448 data->auxiliary.length_valid = 0x1;
449 data->length = reclen;
450 data->operation.operation = 0x01;
452 case DASD_ECKD_CCW_WRITE_CKD:
453 case DASD_ECKD_CCW_WRITE_CKD_MT:
454 data->auxiliary.length_valid = 0x1;
455 data->length = reclen;
456 data->operation.operation = 0x03;
458 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
459 data->operation.orientation = 0x0;
460 data->operation.operation = 0x3F;
461 data->extended_operation = 0x11;
463 data->extended_parameter_length = 0x02;
464 if (data->count > 8) {
465 data->extended_parameter[0] = 0xFF;
466 data->extended_parameter[1] = 0xFF;
467 data->extended_parameter[1] <<= (16 - count);
469 data->extended_parameter[0] = 0xFF;
470 data->extended_parameter[0] <<= (8 - count);
471 data->extended_parameter[1] = 0x00;
475 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
476 data->auxiliary.length_valid = 0x1;
477 data->length = reclen; /* not tlf, as one might think */
478 data->operation.operation = 0x3F;
479 data->extended_operation = 0x23;
481 case DASD_ECKD_CCW_READ:
482 case DASD_ECKD_CCW_READ_MT:
483 case DASD_ECKD_CCW_READ_KD:
484 case DASD_ECKD_CCW_READ_KD_MT:
485 data->auxiliary.length_valid = 0x1;
486 data->length = reclen;
487 data->operation.operation = 0x06;
489 case DASD_ECKD_CCW_READ_CKD:
490 case DASD_ECKD_CCW_READ_CKD_MT:
491 data->auxiliary.length_valid = 0x1;
492 data->length = reclen;
493 data->operation.operation = 0x16;
495 case DASD_ECKD_CCW_READ_COUNT:
496 data->operation.operation = 0x06;
498 case DASD_ECKD_CCW_READ_TRACK:
499 data->operation.orientation = 0x1;
500 data->operation.operation = 0x0C;
501 data->extended_parameter_length = 0;
504 case DASD_ECKD_CCW_READ_TRACK_DATA:
505 data->auxiliary.length_valid = 0x1;
507 data->operation.operation = 0x0C;
509 case DASD_ECKD_CCW_ERASE:
510 data->length = reclen;
511 data->auxiliary.length_valid = 0x1;
512 data->operation.operation = 0x0b;
515 DBF_DEV_EVENT(DBF_ERR, device,
516 "fill LRE unknown opcode 0x%x", cmd);
519 set_ch_t(&data->seek_addr,
520 trk / private->rdc_data.trk_per_cyl,
521 trk % private->rdc_data.trk_per_cyl);
522 data->search_arg.cyl = data->seek_addr.cyl;
523 data->search_arg.head = data->seek_addr.head;
524 data->search_arg.record = rec_on_trk;
527 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
528 unsigned int trk, unsigned int totrk, int cmd,
529 struct dasd_device *basedev, struct dasd_device *startdev,
530 unsigned int format, unsigned int rec_on_trk, int count,
531 unsigned int blksize, unsigned int tlf)
533 struct dasd_eckd_private *basepriv, *startpriv;
534 struct LRE_eckd_data *lredata;
535 struct DE_eckd_data *dedata;
538 basepriv = basedev->private;
539 startpriv = startdev->private;
540 dedata = &pfxdata->define_extent;
541 lredata = &pfxdata->locate_record;
543 ccw->cmd_code = DASD_ECKD_CCW_PFX;
545 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
546 ccw->count = sizeof(*pfxdata) + 2;
547 ccw->cda = (__u32) __pa(pfxdata);
548 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
550 ccw->count = sizeof(*pfxdata);
551 ccw->cda = (__u32) __pa(pfxdata);
552 memset(pfxdata, 0, sizeof(*pfxdata));
557 DBF_DEV_EVENT(DBF_ERR, basedev,
558 "PFX LRE unknown format 0x%x", format);
562 pfxdata->format = format;
563 pfxdata->base_address = basepriv->ned->unit_addr;
564 pfxdata->base_lss = basepriv->ned->ID;
565 pfxdata->validity.define_extent = 1;
567 /* private uid is kept up to date, conf_data may be outdated */
568 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
569 pfxdata->validity.verify_base = 1;
571 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
572 pfxdata->validity.verify_base = 1;
573 pfxdata->validity.hyper_pav = 1;
576 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
579 * For some commands the System Time Stamp is set in the define extent
580 * data when XRC is supported. The validity of the time stamp must be
581 * reflected in the prefix data as well.
583 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
584 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
587 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
588 basedev, blksize, tlf);
594 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
595 unsigned int trk, unsigned int totrk, int cmd,
596 struct dasd_device *basedev, struct dasd_device *startdev)
598 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
603 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
604 unsigned int rec_on_trk, int no_rec, int cmd,
605 struct dasd_device * device, int reclen)
607 struct dasd_eckd_private *private = device->private;
611 DBF_DEV_EVENT(DBF_INFO, device,
612 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
613 trk, rec_on_trk, no_rec, cmd, reclen);
615 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
618 ccw->cda = (__u32) __pa(data);
620 memset(data, 0, sizeof(struct LO_eckd_data));
623 switch (private->rdc_data.dev_type) {
625 dn = ceil_quot(reclen + 6, 232);
626 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
627 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
630 d = 7 + ceil_quot(reclen + 12, 32);
631 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
635 data->sector = sector;
636 data->count = no_rec;
638 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
639 data->operation.orientation = 0x3;
640 data->operation.operation = 0x03;
642 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
643 data->operation.orientation = 0x3;
644 data->operation.operation = 0x16;
646 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
647 data->operation.orientation = 0x1;
648 data->operation.operation = 0x03;
651 case DASD_ECKD_CCW_READ_RECORD_ZERO:
652 data->operation.orientation = 0x3;
653 data->operation.operation = 0x16;
656 case DASD_ECKD_CCW_WRITE:
657 case DASD_ECKD_CCW_WRITE_MT:
658 case DASD_ECKD_CCW_WRITE_KD:
659 case DASD_ECKD_CCW_WRITE_KD_MT:
660 data->auxiliary.last_bytes_used = 0x1;
661 data->length = reclen;
662 data->operation.operation = 0x01;
664 case DASD_ECKD_CCW_WRITE_CKD:
665 case DASD_ECKD_CCW_WRITE_CKD_MT:
666 data->auxiliary.last_bytes_used = 0x1;
667 data->length = reclen;
668 data->operation.operation = 0x03;
670 case DASD_ECKD_CCW_READ:
671 case DASD_ECKD_CCW_READ_MT:
672 case DASD_ECKD_CCW_READ_KD:
673 case DASD_ECKD_CCW_READ_KD_MT:
674 data->auxiliary.last_bytes_used = 0x1;
675 data->length = reclen;
676 data->operation.operation = 0x06;
678 case DASD_ECKD_CCW_READ_CKD:
679 case DASD_ECKD_CCW_READ_CKD_MT:
680 data->auxiliary.last_bytes_used = 0x1;
681 data->length = reclen;
682 data->operation.operation = 0x16;
684 case DASD_ECKD_CCW_READ_COUNT:
685 data->operation.operation = 0x06;
687 case DASD_ECKD_CCW_ERASE:
688 data->length = reclen;
689 data->auxiliary.last_bytes_used = 0x1;
690 data->operation.operation = 0x0b;
693 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
696 set_ch_t(&data->seek_addr,
697 trk / private->rdc_data.trk_per_cyl,
698 trk % private->rdc_data.trk_per_cyl);
699 data->search_arg.cyl = data->seek_addr.cyl;
700 data->search_arg.head = data->seek_addr.head;
701 data->search_arg.record = rec_on_trk;
705 * Returns 1 if the block is one of the special blocks that needs
706 * to get read/written with the KD variant of the command.
707 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
708 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
709 * Luckily the KD variants differ only by one bit (0x08) from the
710 * normal variant. So don't wonder about code like:
711 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
712 * ccw->cmd_code |= 0x8;
715 dasd_eckd_cdl_special(int blk_per_trk, int recid)
719 if (recid < blk_per_trk)
721 if (recid < 2 * blk_per_trk)
727 * Returns the record size for the special blocks of the cdl format.
728 * Only returns something useful if dasd_eckd_cdl_special is true
732 dasd_eckd_cdl_reclen(int recid)
735 return sizes_trk0[recid];
738 /* create unique id from private structure. */
739 static void create_uid(struct dasd_eckd_private *private)
742 struct dasd_uid *uid;
745 memset(uid, 0, sizeof(struct dasd_uid));
746 memcpy(uid->vendor, private->ned->HDA_manufacturer,
747 sizeof(uid->vendor) - 1);
748 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
749 memcpy(uid->serial, private->ned->HDA_location,
750 sizeof(uid->serial) - 1);
751 EBCASC(uid->serial, sizeof(uid->serial) - 1);
752 uid->ssid = private->gneq->subsystemID;
753 uid->real_unit_addr = private->ned->unit_addr;
755 uid->type = private->sneq->sua_flags;
756 if (uid->type == UA_BASE_PAV_ALIAS)
757 uid->base_unit_addr = private->sneq->base_unit_addr;
759 uid->type = UA_BASE_DEVICE;
761 if (private->vdsneq) {
762 for (count = 0; count < 16; count++) {
763 sprintf(uid->vduit+2*count, "%02x",
764 private->vdsneq->uit[count]);
770 * Generate device unique id that specifies the physical device.
772 static int dasd_eckd_generate_uid(struct dasd_device *device)
774 struct dasd_eckd_private *private = device->private;
779 if (!private->ned || !private->gneq)
781 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
783 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
787 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
789 struct dasd_eckd_private *private = device->private;
793 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
795 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
802 * compare device UID with data of a given dasd_eckd_private structure
805 static int dasd_eckd_compare_path_uid(struct dasd_device *device,
806 struct dasd_eckd_private *private)
808 struct dasd_uid device_uid;
811 dasd_eckd_get_uid(device, &device_uid);
813 return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
816 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
817 struct dasd_ccw_req *cqr,
823 * buffer has to start with EBCDIC "V1.0" to show
824 * support for virtual device SNEQ
826 rcd_buffer[0] = 0xE5;
827 rcd_buffer[1] = 0xF1;
828 rcd_buffer[2] = 0x4B;
829 rcd_buffer[3] = 0xF0;
832 ccw->cmd_code = DASD_ECKD_CCW_RCD;
834 ccw->cda = (__u32)(addr_t)rcd_buffer;
835 ccw->count = DASD_ECKD_RCD_DATA_SIZE;
836 cqr->magic = DASD_ECKD_MAGIC;
838 cqr->startdev = device;
839 cqr->memdev = device;
841 cqr->expires = 10*HZ;
844 cqr->buildclk = get_tod_clock();
845 cqr->status = DASD_CQR_FILLED;
846 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
850 * Wakeup helper for read_conf
851 * if the cqr is not done and needs some error recovery
852 * the buffer has to be re-initialized with the EBCDIC "V1.0"
853 * to show support for virtual device SNEQ
855 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
860 if (cqr->status != DASD_CQR_DONE) {
862 rcd_buffer = (__u8 *)((addr_t) ccw->cda);
863 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
865 rcd_buffer[0] = 0xE5;
866 rcd_buffer[1] = 0xF1;
867 rcd_buffer[2] = 0x4B;
868 rcd_buffer[3] = 0xF0;
870 dasd_wakeup_cb(cqr, data);
873 static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
874 struct dasd_ccw_req *cqr,
881 * sanity check: scan for RCD command in extended SenseID data
882 * some devices do not support RCD
884 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
885 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
888 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
889 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
890 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
892 cqr->callback = read_conf_cb;
893 rc = dasd_sleep_on_immediatly(cqr);
897 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
899 int *rcd_buffer_size, __u8 lpm)
902 char *rcd_buf = NULL;
904 struct dasd_ccw_req *cqr;
907 * sanity check: scan for RCD command in extended SenseID data
908 * some devices do not support RCD
910 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
911 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
915 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
920 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
921 0, /* use rcd_buf as data ara */
924 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
925 "Could not allocate RCD request");
929 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
930 cqr->callback = read_conf_cb;
931 ret = dasd_sleep_on(cqr);
933 * on success we update the user input parms
935 dasd_sfree_request(cqr, cqr->memdev);
939 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
940 *rcd_buffer = rcd_buf;
945 *rcd_buffer_size = 0;
949 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
952 struct dasd_sneq *sneq;
956 private->sneq = NULL;
957 private->vdsneq = NULL;
958 private->gneq = NULL;
959 count = private->conf_len / sizeof(struct dasd_sneq);
960 sneq = (struct dasd_sneq *)private->conf_data;
961 for (i = 0; i < count; ++i) {
962 if (sneq->flags.identifier == 1 && sneq->format == 1)
963 private->sneq = sneq;
964 else if (sneq->flags.identifier == 1 && sneq->format == 4)
965 private->vdsneq = (struct vd_sneq *)sneq;
966 else if (sneq->flags.identifier == 2)
967 private->gneq = (struct dasd_gneq *)sneq;
968 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
969 private->ned = (struct dasd_ned *)sneq;
972 if (!private->ned || !private->gneq) {
974 private->sneq = NULL;
975 private->vdsneq = NULL;
976 private->gneq = NULL;
983 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
985 struct dasd_gneq *gneq;
988 count = conf_len / sizeof(*gneq);
989 gneq = (struct dasd_gneq *)conf_data;
991 for (i = 0; i < count; ++i) {
992 if (gneq->flags.identifier == 2) {
999 return ((char *)gneq)[18] & 0x07;
1004 static void dasd_eckd_store_conf_data(struct dasd_device *device,
1005 struct dasd_conf_data *conf_data, int chp)
1007 struct channel_path_desc_fmt0 *chp_desc;
1008 struct subchannel_id sch_id;
1010 ccw_device_get_schid(device->cdev, &sch_id);
1012 * path handling and read_conf allocate data
1013 * free it before replacing the pointer
1015 kfree(device->path[chp].conf_data);
1016 device->path[chp].conf_data = conf_data;
1017 device->path[chp].cssid = sch_id.cssid;
1018 device->path[chp].ssid = sch_id.ssid;
1019 chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
1021 device->path[chp].chpid = chp_desc->chpid;
1025 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1027 struct dasd_eckd_private *private = device->private;
1030 private->conf_data = NULL;
1031 private->conf_len = 0;
1032 for (i = 0; i < 8; i++) {
1033 kfree(device->path[i].conf_data);
1034 device->path[i].conf_data = NULL;
1035 device->path[i].cssid = 0;
1036 device->path[i].ssid = 0;
1037 device->path[i].chpid = 0;
1038 dasd_path_notoper(device, i);
1042 static void dasd_eckd_read_fc_security(struct dasd_device *device)
1044 struct dasd_eckd_private *private = device->private;
1050 rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
1052 for (chp = 0; chp < 8; chp++)
1053 device->path[chp].fc_security = 0;
1057 for (chp = 0; chp < 8; chp++) {
1058 if (esm_valid & (0x80 >> chp))
1059 device->path[chp].fc_security = esm[chp];
1061 device->path[chp].fc_security = 0;
1065 static int dasd_eckd_read_conf(struct dasd_device *device)
1068 int conf_len, conf_data_saved;
1069 int rc, path_err, pos;
1071 struct dasd_eckd_private *private, path_private;
1072 struct dasd_uid *uid;
1073 char print_path_uid[60], print_device_uid[60];
1075 private = device->private;
1076 opm = ccw_device_get_path_mask(device->cdev);
1077 conf_data_saved = 0;
1079 /* get configuration data per operational path */
1080 for (lpm = 0x80; lpm; lpm>>= 1) {
1083 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1085 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
1086 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1087 "Read configuration data returned "
1091 if (conf_data == NULL) {
1092 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1093 "No configuration data "
1095 /* no further analysis possible */
1096 dasd_path_add_opm(device, opm);
1097 continue; /* no error */
1099 /* save first valid configuration data */
1100 if (!conf_data_saved) {
1101 /* initially clear previously stored conf_data */
1102 dasd_eckd_clear_conf_data(device);
1103 private->conf_data = conf_data;
1104 private->conf_len = conf_len;
1105 if (dasd_eckd_identify_conf_parts(private)) {
1106 private->conf_data = NULL;
1107 private->conf_len = 0;
1112 * build device UID that other path data
1113 * can be compared to it
1115 dasd_eckd_generate_uid(device);
1118 path_private.conf_data = conf_data;
1119 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1120 if (dasd_eckd_identify_conf_parts(
1122 path_private.conf_data = NULL;
1123 path_private.conf_len = 0;
1127 if (dasd_eckd_compare_path_uid(
1128 device, &path_private)) {
1129 uid = &path_private.uid;
1130 if (strlen(uid->vduit) > 0)
1131 snprintf(print_path_uid,
1132 sizeof(print_path_uid),
1133 "%s.%s.%04x.%02x.%s",
1134 uid->vendor, uid->serial,
1135 uid->ssid, uid->real_unit_addr,
1138 snprintf(print_path_uid,
1139 sizeof(print_path_uid),
1141 uid->vendor, uid->serial,
1143 uid->real_unit_addr);
1144 uid = &private->uid;
1145 if (strlen(uid->vduit) > 0)
1146 snprintf(print_device_uid,
1147 sizeof(print_device_uid),
1148 "%s.%s.%04x.%02x.%s",
1149 uid->vendor, uid->serial,
1150 uid->ssid, uid->real_unit_addr,
1153 snprintf(print_device_uid,
1154 sizeof(print_device_uid),
1156 uid->vendor, uid->serial,
1158 uid->real_unit_addr);
1159 dev_err(&device->cdev->dev,
1160 "Not all channel paths lead to "
1161 "the same device, path %02X leads to "
1162 "device %s instead of %s\n", lpm,
1163 print_path_uid, print_device_uid);
1165 dasd_path_add_cablepm(device, lpm);
1168 path_private.conf_data = NULL;
1169 path_private.conf_len = 0;
1172 pos = pathmask_to_pos(lpm);
1173 dasd_eckd_store_conf_data(device, conf_data, pos);
1175 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1177 dasd_path_add_nppm(device, lpm);
1180 dasd_path_add_ppm(device, lpm);
1183 if (!dasd_path_get_opm(device)) {
1184 dasd_path_set_opm(device, lpm);
1185 dasd_generic_path_operational(device);
1187 dasd_path_add_opm(device, lpm);
1191 dasd_eckd_read_fc_security(device);
1196 static u32 get_fcx_max_data(struct dasd_device *device)
1198 struct dasd_eckd_private *private = device->private;
1199 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1205 /* is transport mode supported? */
1206 fcx_in_css = css_general_characteristics.fcx;
1207 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1208 fcx_in_features = private->features.feature[40] & 0x80;
1209 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1214 mdc = ccw_device_get_mdc(device->cdev, 0);
1216 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1219 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1223 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1225 struct dasd_eckd_private *private = device->private;
1229 if (private->fcx_max_data) {
1230 mdc = ccw_device_get_mdc(device->cdev, lpm);
1232 dev_warn(&device->cdev->dev,
1233 "Detecting the maximum data size for zHPF "
1234 "requests failed (rc=%d) for a new path %x\n",
1238 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1239 if (fcx_max_data < private->fcx_max_data) {
1240 dev_warn(&device->cdev->dev,
1241 "The maximum data size for zHPF requests %u "
1242 "on a new path %x is below the active maximum "
1243 "%u\n", fcx_max_data, lpm,
1244 private->fcx_max_data);
1251 static int rebuild_device_uid(struct dasd_device *device,
1252 struct pe_handler_work_data *data)
1254 struct dasd_eckd_private *private = device->private;
1255 __u8 lpm, opm = dasd_path_get_opm(device);
1258 for (lpm = 0x80; lpm; lpm >>= 1) {
1261 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1262 memset(&data->cqr, 0, sizeof(data->cqr));
1263 data->cqr.cpaddr = &data->ccw;
1264 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1269 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1271 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1272 "Read configuration data "
1273 "returned error %d", rc);
1276 memcpy(private->conf_data, data->rcd_buffer,
1277 DASD_ECKD_RCD_DATA_SIZE);
1278 if (dasd_eckd_identify_conf_parts(private)) {
1280 } else /* first valid path is enough */
1285 rc = dasd_eckd_generate_uid(device);
1290 static void dasd_eckd_path_available_action(struct dasd_device *device,
1291 struct pe_handler_work_data *data)
1293 struct dasd_eckd_private path_private;
1294 struct dasd_uid *uid;
1295 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1296 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1297 struct dasd_conf_data *conf_data;
1298 unsigned long flags;
1309 for (lpm = 0x80; lpm; lpm >>= 1) {
1310 if (!(lpm & data->tbvpm))
1312 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1313 memset(&data->cqr, 0, sizeof(data->cqr));
1314 data->cqr.cpaddr = &data->ccw;
1315 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1319 switch (dasd_eckd_path_access(data->rcd_buffer,
1320 DASD_ECKD_RCD_DATA_SIZE)
1330 } else if (rc == -EOPNOTSUPP) {
1331 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1332 "path verification: No configuration "
1335 } else if (rc == -EAGAIN) {
1336 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1337 "path verification: device is stopped,"
1338 " try again later");
1341 dev_warn(&device->cdev->dev,
1342 "Reading device feature codes failed "
1343 "(rc=%d) for new path %x\n", rc, lpm);
1346 if (verify_fcx_max_data(device, lpm)) {
1355 * save conf_data for comparison after
1356 * rebuild_device_uid may have changed
1359 memcpy(&path_rcd_buf, data->rcd_buffer,
1360 DASD_ECKD_RCD_DATA_SIZE);
1361 path_private.conf_data = (void *) &path_rcd_buf;
1362 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1363 if (dasd_eckd_identify_conf_parts(&path_private)) {
1364 path_private.conf_data = NULL;
1365 path_private.conf_len = 0;
1370 * compare path UID with device UID only if at least
1371 * one valid path is left
1372 * in other case the device UID may have changed and
1373 * the first working path UID will be used as device UID
1375 if (dasd_path_get_opm(device) &&
1376 dasd_eckd_compare_path_uid(device, &path_private)) {
1378 * the comparison was not successful
1379 * rebuild the device UID with at least one
1380 * known path in case a z/VM hyperswap command
1381 * has changed the device
1383 * after this compare again
1385 * if either the rebuild or the recompare fails
1386 * the path can not be used
1388 if (rebuild_device_uid(device, data) ||
1389 dasd_eckd_compare_path_uid(
1390 device, &path_private)) {
1391 uid = &path_private.uid;
1392 if (strlen(uid->vduit) > 0)
1393 snprintf(print_uid, sizeof(print_uid),
1394 "%s.%s.%04x.%02x.%s",
1395 uid->vendor, uid->serial,
1396 uid->ssid, uid->real_unit_addr,
1399 snprintf(print_uid, sizeof(print_uid),
1401 uid->vendor, uid->serial,
1403 uid->real_unit_addr);
1404 dev_err(&device->cdev->dev,
1405 "The newly added channel path %02X "
1406 "will not be used because it leads "
1407 "to a different device %s\n",
1417 conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
1419 memcpy(conf_data, data->rcd_buffer,
1420 DASD_ECKD_RCD_DATA_SIZE);
1422 pos = pathmask_to_pos(lpm);
1423 dasd_eckd_store_conf_data(device, conf_data, pos);
1426 * There is a small chance that a path is lost again between
1427 * above path verification and the following modification of
1428 * the device opm mask. We could avoid that race here by using
1429 * yet another path mask, but we rather deal with this unlikely
1430 * situation in dasd_start_IO.
1432 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1433 if (!dasd_path_get_opm(device) && opm) {
1434 dasd_path_set_opm(device, opm);
1435 dasd_generic_path_operational(device);
1437 dasd_path_add_opm(device, opm);
1439 dasd_path_add_nppm(device, npm);
1440 dasd_path_add_ppm(device, ppm);
1441 dasd_path_add_tbvpm(device, epm);
1442 dasd_path_add_cablepm(device, cablepm);
1443 dasd_path_add_nohpfpm(device, hpfpm);
1444 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1446 dasd_path_create_kobj(device, pos);
1450 static void do_pe_handler_work(struct work_struct *work)
1452 struct pe_handler_work_data *data;
1453 struct dasd_device *device;
1455 data = container_of(work, struct pe_handler_work_data, worker);
1456 device = data->device;
1458 /* delay path verification until device was resumed */
1459 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1460 schedule_work(work);
1463 /* check if path verification already running and delay if so */
1464 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1465 schedule_work(work);
1470 dasd_eckd_path_available_action(device, data);
1472 dasd_eckd_read_fc_security(device);
1474 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1475 dasd_put_device(device);
1477 mutex_unlock(&dasd_pe_handler_mutex);
1482 static int dasd_eckd_pe_handler(struct dasd_device *device,
1483 __u8 tbvpm, __u8 fcsecpm)
1485 struct pe_handler_work_data *data;
1487 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1489 if (mutex_trylock(&dasd_pe_handler_mutex)) {
1490 data = pe_handler_worker;
1496 memset(data, 0, sizeof(*data));
1499 INIT_WORK(&data->worker, do_pe_handler_work);
1500 dasd_get_device(device);
1501 data->device = device;
1502 data->tbvpm = tbvpm;
1503 data->fcsecpm = fcsecpm;
1504 schedule_work(&data->worker);
1508 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1510 struct dasd_eckd_private *private = device->private;
1511 unsigned long flags;
1513 if (!private->fcx_max_data)
1514 private->fcx_max_data = get_fcx_max_data(device);
1515 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1516 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1517 dasd_schedule_device_bh(device);
1518 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1521 static int dasd_eckd_read_features(struct dasd_device *device)
1523 struct dasd_eckd_private *private = device->private;
1524 struct dasd_psf_prssd_data *prssdp;
1525 struct dasd_rssd_features *features;
1526 struct dasd_ccw_req *cqr;
1530 memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1531 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1532 (sizeof(struct dasd_psf_prssd_data) +
1533 sizeof(struct dasd_rssd_features)),
1536 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1537 "allocate initialization request");
1538 return PTR_ERR(cqr);
1540 cqr->startdev = device;
1541 cqr->memdev = device;
1544 cqr->expires = 10 * HZ;
1546 /* Prepare for Read Subsystem Data */
1547 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1548 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1549 prssdp->order = PSF_ORDER_PRSSD;
1550 prssdp->suborder = 0x41; /* Read Feature Codes */
1551 /* all other bytes of prssdp must be zero */
1554 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1555 ccw->count = sizeof(struct dasd_psf_prssd_data);
1556 ccw->flags |= CCW_FLAG_CC;
1557 ccw->cda = (__u32)(addr_t) prssdp;
1559 /* Read Subsystem Data - feature codes */
1560 features = (struct dasd_rssd_features *) (prssdp + 1);
1561 memset(features, 0, sizeof(struct dasd_rssd_features));
1564 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1565 ccw->count = sizeof(struct dasd_rssd_features);
1566 ccw->cda = (__u32)(addr_t) features;
1568 cqr->buildclk = get_tod_clock();
1569 cqr->status = DASD_CQR_FILLED;
1570 rc = dasd_sleep_on(cqr);
1572 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1573 features = (struct dasd_rssd_features *) (prssdp + 1);
1574 memcpy(&private->features, features,
1575 sizeof(struct dasd_rssd_features));
1577 dev_warn(&device->cdev->dev, "Reading device feature codes"
1578 " failed with rc=%d\n", rc);
1579 dasd_sfree_request(cqr, cqr->memdev);
1583 /* Read Volume Information - Volume Storage Query */
1584 static int dasd_eckd_read_vol_info(struct dasd_device *device)
1586 struct dasd_eckd_private *private = device->private;
1587 struct dasd_psf_prssd_data *prssdp;
1588 struct dasd_rssd_vsq *vsq;
1589 struct dasd_ccw_req *cqr;
1594 /* This command cannot be executed on an alias device */
1595 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1596 private->uid.type == UA_HYPER_PAV_ALIAS)
1600 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1601 sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1603 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1604 "Could not allocate initialization request");
1605 mutex_lock(&dasd_vol_info_mutex);
1607 cqr = &dasd_vol_info_req->cqr;
1608 memset(cqr, 0, sizeof(*cqr));
1609 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
1610 cqr->cpaddr = &dasd_vol_info_req->ccw;
1611 cqr->data = &dasd_vol_info_req->data;
1612 cqr->magic = DASD_ECKD_MAGIC;
1615 /* Prepare for Read Subsystem Data */
1617 prssdp->order = PSF_ORDER_PRSSD;
1618 prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
1619 prssdp->lss = private->ned->ID;
1620 prssdp->volume = private->ned->unit_addr;
1623 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1624 ccw->count = sizeof(*prssdp);
1625 ccw->flags |= CCW_FLAG_CC;
1626 ccw->cda = (__u32)(addr_t)prssdp;
1628 /* Read Subsystem Data - Volume Storage Query */
1629 vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1630 memset(vsq, 0, sizeof(*vsq));
1633 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1634 ccw->count = sizeof(*vsq);
1635 ccw->flags |= CCW_FLAG_SLI;
1636 ccw->cda = (__u32)(addr_t)vsq;
1638 cqr->buildclk = get_tod_clock();
1639 cqr->status = DASD_CQR_FILLED;
1640 cqr->startdev = device;
1641 cqr->memdev = device;
1644 cqr->expires = device->default_expires * HZ;
1645 /* The command might not be supported. Suppress the error output */
1646 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1648 rc = dasd_sleep_on_interruptible(cqr);
1650 memcpy(&private->vsq, vsq, sizeof(*vsq));
1652 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1653 "Reading the volume storage information failed with rc=%d", rc);
1657 mutex_unlock(&dasd_vol_info_mutex);
1659 dasd_sfree_request(cqr, cqr->memdev);
1664 static int dasd_eckd_is_ese(struct dasd_device *device)
1666 struct dasd_eckd_private *private = device->private;
1668 return private->vsq.vol_info.ese;
1671 static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1673 struct dasd_eckd_private *private = device->private;
1675 return private->vsq.extent_pool_id;
1679 * This value represents the total amount of available space. As more space is
1680 * allocated by ESE volumes, this value will decrease.
1681 * The data for this value is therefore updated on any call.
1683 static int dasd_eckd_space_configured(struct dasd_device *device)
1685 struct dasd_eckd_private *private = device->private;
1688 rc = dasd_eckd_read_vol_info(device);
1690 return rc ? : private->vsq.space_configured;
1694 * The value of space allocated by an ESE volume may have changed and is
1695 * therefore updated on any call.
1697 static int dasd_eckd_space_allocated(struct dasd_device *device)
1699 struct dasd_eckd_private *private = device->private;
1702 rc = dasd_eckd_read_vol_info(device);
1704 return rc ? : private->vsq.space_allocated;
1707 static int dasd_eckd_logical_capacity(struct dasd_device *device)
1709 struct dasd_eckd_private *private = device->private;
1711 return private->vsq.logical_capacity;
1714 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
1716 struct ext_pool_exhaust_work_data *data;
1717 struct dasd_device *device;
1718 struct dasd_device *base;
1720 data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1721 device = data->device;
1726 if (dasd_eckd_space_configured(base) != 0) {
1727 dasd_generic_space_avail(device);
1729 dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
1730 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
1733 dasd_put_device(device);
1737 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
1738 struct dasd_ccw_req *cqr)
1740 struct ext_pool_exhaust_work_data *data;
1742 data = kzalloc(sizeof(*data), GFP_ATOMIC);
1745 INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1746 dasd_get_device(device);
1747 data->device = device;
1750 data->base = cqr->block->base;
1751 else if (cqr->basedev)
1752 data->base = cqr->basedev;
1756 schedule_work(&data->worker);
1761 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1762 struct dasd_rssd_lcq *lcq)
1764 struct dasd_eckd_private *private = device->private;
1765 int pool_id = dasd_eckd_ext_pool_id(device);
1766 struct dasd_ext_pool_sum eps;
1769 for (i = 0; i < lcq->pool_count; i++) {
1770 eps = lcq->ext_pool_sum[i];
1771 if (eps.pool_id == pool_id) {
1772 memcpy(&private->eps, &eps,
1773 sizeof(struct dasd_ext_pool_sum));
1778 /* Read Extent Pool Information - Logical Configuration Query */
1779 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1781 struct dasd_eckd_private *private = device->private;
1782 struct dasd_psf_prssd_data *prssdp;
1783 struct dasd_rssd_lcq *lcq;
1784 struct dasd_ccw_req *cqr;
1788 /* This command cannot be executed on an alias device */
1789 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1790 private->uid.type == UA_HYPER_PAV_ALIAS)
1793 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1794 sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1796 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1797 "Could not allocate initialization request");
1798 return PTR_ERR(cqr);
1801 /* Prepare for Read Subsystem Data */
1803 memset(prssdp, 0, sizeof(*prssdp));
1804 prssdp->order = PSF_ORDER_PRSSD;
1805 prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */
1808 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1809 ccw->count = sizeof(*prssdp);
1810 ccw->flags |= CCW_FLAG_CC;
1811 ccw->cda = (__u32)(addr_t)prssdp;
1813 lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1814 memset(lcq, 0, sizeof(*lcq));
1817 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1818 ccw->count = sizeof(*lcq);
1819 ccw->flags |= CCW_FLAG_SLI;
1820 ccw->cda = (__u32)(addr_t)lcq;
1822 cqr->buildclk = get_tod_clock();
1823 cqr->status = DASD_CQR_FILLED;
1824 cqr->startdev = device;
1825 cqr->memdev = device;
1828 cqr->expires = device->default_expires * HZ;
1829 /* The command might not be supported. Suppress the error output */
1830 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1832 rc = dasd_sleep_on_interruptible(cqr);
1834 dasd_eckd_cpy_ext_pool_data(device, lcq);
1836 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1837 "Reading the logical configuration failed with rc=%d", rc);
1840 dasd_sfree_request(cqr, cqr->memdev);
1846 * Depending on the device type, the extent size is specified either as
1847 * cylinders per extent (CKD) or size per extent (FBA)
1848 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1850 static int dasd_eckd_ext_size(struct dasd_device *device)
1852 struct dasd_eckd_private *private = device->private;
1853 struct dasd_ext_pool_sum eps = private->eps;
1855 if (!eps.flags.extent_size_valid)
1857 if (eps.extent_size.size_1G)
1859 if (eps.extent_size.size_16M)
1865 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1867 struct dasd_eckd_private *private = device->private;
1869 return private->eps.warn_thrshld;
1872 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1874 struct dasd_eckd_private *private = device->private;
1876 return private->eps.flags.capacity_at_warnlevel;
1880 * Extent Pool out of space
1882 static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1884 struct dasd_eckd_private *private = device->private;
1886 return private->eps.flags.pool_oos;
1890 * Build CP for Perform Subsystem Function - SSC.
1892 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1895 struct dasd_ccw_req *cqr;
1896 struct dasd_psf_ssc_data *psf_ssc_data;
1899 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1900 sizeof(struct dasd_psf_ssc_data),
1904 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1905 "Could not allocate PSF-SSC request");
1908 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1909 psf_ssc_data->order = PSF_ORDER_SSC;
1910 psf_ssc_data->suborder = 0xc0;
1912 psf_ssc_data->suborder |= 0x08;
1913 psf_ssc_data->reserved[0] = 0x88;
1916 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1917 ccw->cda = (__u32)(addr_t)psf_ssc_data;
1920 cqr->startdev = device;
1921 cqr->memdev = device;
1924 cqr->expires = 10*HZ;
1925 cqr->buildclk = get_tod_clock();
1926 cqr->status = DASD_CQR_FILLED;
1931 * Perform Subsystem Function.
1932 * It is necessary to trigger CIO for channel revalidation since this
1933 * call might change behaviour of DASD devices.
1936 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1937 unsigned long flags)
1939 struct dasd_ccw_req *cqr;
1942 cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1944 return PTR_ERR(cqr);
1947 * set flags e.g. turn on failfast, to prevent blocking
1948 * the calling function should handle failed requests
1950 cqr->flags |= flags;
1952 rc = dasd_sleep_on(cqr);
1954 /* trigger CIO to reprobe devices */
1955 css_schedule_reprobe();
1956 else if (cqr->intrc == -EAGAIN)
1959 dasd_sfree_request(cqr, cqr->memdev);
1964 * Valide storage server of current device.
1966 static int dasd_eckd_validate_server(struct dasd_device *device,
1967 unsigned long flags)
1969 struct dasd_eckd_private *private = device->private;
1972 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1973 private->uid.type == UA_HYPER_PAV_ALIAS)
1975 if (dasd_nopav || MACHINE_IS_VM)
1979 rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1981 /* may be requested feature is not available on server,
1982 * therefore just report error and go ahead */
1983 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1984 "returned rc=%d", private->uid.ssid, rc);
1989 * worker to do a validate server in case of a lost pathgroup
1991 static void dasd_eckd_do_validate_server(struct work_struct *work)
1993 struct dasd_device *device = container_of(work, struct dasd_device,
1995 unsigned long flags = 0;
1997 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1998 if (dasd_eckd_validate_server(device, flags)
2000 /* schedule worker again if failed */
2001 schedule_work(&device->kick_validate);
2005 dasd_put_device(device);
2008 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
2010 dasd_get_device(device);
2011 /* exit if device not online or in offline processing */
2012 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
2013 device->state < DASD_STATE_ONLINE) {
2014 dasd_put_device(device);
2017 /* queue call to do_validate_server to the kernel event daemon. */
2018 if (!schedule_work(&device->kick_validate))
2019 dasd_put_device(device);
2023 * Check device characteristics.
2024 * If the device is accessible using ECKD discipline, the device is enabled.
2027 dasd_eckd_check_characteristics(struct dasd_device *device)
2029 struct dasd_eckd_private *private = device->private;
2030 struct dasd_block *block;
2031 struct dasd_uid temp_uid;
2034 unsigned long value;
2036 /* setup work queue for validate server*/
2037 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
2038 /* setup work queue for summary unit check */
2039 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
2041 if (!ccw_device_is_pathgroup(device->cdev)) {
2042 dev_warn(&device->cdev->dev,
2043 "A channel path group could not be established\n");
2046 if (!ccw_device_is_multipath(device->cdev)) {
2047 dev_info(&device->cdev->dev,
2048 "The DASD is not operating in multipath mode\n");
2051 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
2053 dev_warn(&device->cdev->dev,
2054 "Allocating memory for private DASD data "
2058 device->private = private;
2060 memset(private, 0, sizeof(*private));
2062 /* Invalidate status of initial analysis. */
2063 private->init_cqr_status = -1;
2064 /* Set default cache operations. */
2065 private->attrib.operation = DASD_NORMAL_CACHE;
2066 private->attrib.nr_cyl = 0;
2068 /* Read Configuration Data */
2069 rc = dasd_eckd_read_conf(device);
2073 /* set some default values */
2074 device->default_expires = DASD_EXPIRES;
2075 device->default_retries = DASD_RETRIES;
2076 device->path_thrhld = DASD_ECKD_PATH_THRHLD;
2077 device->path_interval = DASD_ECKD_PATH_INTERVAL;
2079 if (private->gneq) {
2081 for (i = 0; i < private->gneq->timeout.value; i++)
2083 value = value * private->gneq->timeout.number;
2084 /* do not accept useless values */
2085 if (value != 0 && value <= DASD_EXPIRES_MAX)
2086 device->default_expires = value;
2089 dasd_eckd_get_uid(device, &temp_uid);
2090 if (temp_uid.type == UA_BASE_DEVICE) {
2091 block = dasd_alloc_block();
2092 if (IS_ERR(block)) {
2093 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
2094 "could not allocate dasd "
2096 rc = PTR_ERR(block);
2099 device->block = block;
2100 block->base = device;
2103 /* register lcu with alias handling, enable PAV */
2104 rc = dasd_alias_make_device_known_to_lcu(device);
2108 dasd_eckd_validate_server(device, 0);
2110 /* device may report different configuration data after LCU setup */
2111 rc = dasd_eckd_read_conf(device);
2115 dasd_path_create_kobjects(device);
2117 /* Read Feature Codes */
2118 dasd_eckd_read_features(device);
2120 /* Read Volume Information */
2121 dasd_eckd_read_vol_info(device);
2123 /* Read Extent Pool Information */
2124 dasd_eckd_read_ext_pool_info(device);
2126 /* Read Device Characteristics */
2127 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
2128 &private->rdc_data, 64);
2130 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2131 "Read device characteristic failed, rc=%d", rc);
2135 if ((device->features & DASD_FEATURE_USERAW) &&
2136 !(private->rdc_data.facilities.RT_in_LR)) {
2137 dev_err(&device->cdev->dev, "The storage server does not "
2138 "support raw-track access\n");
2143 /* find the valid cylinder size */
2144 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
2145 private->rdc_data.long_no_cyl)
2146 private->real_cyl = private->rdc_data.long_no_cyl;
2148 private->real_cyl = private->rdc_data.no_cyl;
2150 private->fcx_max_data = get_fcx_max_data(device);
2152 readonly = dasd_device_is_ro(device);
2154 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2156 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2157 "with %d cylinders, %d heads, %d sectors%s\n",
2158 private->rdc_data.dev_type,
2159 private->rdc_data.dev_model,
2160 private->rdc_data.cu_type,
2161 private->rdc_data.cu_model.model,
2163 private->rdc_data.trk_per_cyl,
2164 private->rdc_data.sec_per_trk,
2165 readonly ? ", read-only device" : "");
2169 dasd_alias_disconnect_device_from_lcu(device);
2171 dasd_free_block(device->block);
2172 device->block = NULL;
2174 dasd_eckd_clear_conf_data(device);
2175 dasd_path_remove_kobjects(device);
2176 kfree(device->private);
2177 device->private = NULL;
2181 static void dasd_eckd_uncheck_device(struct dasd_device *device)
2183 struct dasd_eckd_private *private = device->private;
2188 dasd_alias_disconnect_device_from_lcu(device);
2189 private->ned = NULL;
2190 private->sneq = NULL;
2191 private->vdsneq = NULL;
2192 private->gneq = NULL;
2193 dasd_eckd_clear_conf_data(device);
2194 dasd_path_remove_kobjects(device);
2197 static struct dasd_ccw_req *
2198 dasd_eckd_analysis_ccw(struct dasd_device *device)
2200 struct dasd_eckd_private *private = device->private;
2201 struct eckd_count *count_data;
2202 struct LO_eckd_data *LO_data;
2203 struct dasd_ccw_req *cqr;
2205 int cplength, datasize;
2209 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2210 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2215 /* Define extent for the first 2 tracks. */
2216 define_extent(ccw++, cqr->data, 0, 1,
2217 DASD_ECKD_CCW_READ_COUNT, device, 0);
2218 LO_data = cqr->data + sizeof(struct DE_eckd_data);
2219 /* Locate record for the first 4 records on track 0. */
2220 ccw[-1].flags |= CCW_FLAG_CC;
2221 locate_record(ccw++, LO_data++, 0, 0, 4,
2222 DASD_ECKD_CCW_READ_COUNT, device, 0);
2224 count_data = private->count_area;
2225 for (i = 0; i < 4; i++) {
2226 ccw[-1].flags |= CCW_FLAG_CC;
2227 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2230 ccw->cda = (__u32)(addr_t) count_data;
2235 /* Locate record for the first record on track 1. */
2236 ccw[-1].flags |= CCW_FLAG_CC;
2237 locate_record(ccw++, LO_data++, 1, 0, 1,
2238 DASD_ECKD_CCW_READ_COUNT, device, 0);
2239 /* Read count ccw. */
2240 ccw[-1].flags |= CCW_FLAG_CC;
2241 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2244 ccw->cda = (__u32)(addr_t) count_data;
2247 cqr->startdev = device;
2248 cqr->memdev = device;
2250 cqr->buildclk = get_tod_clock();
2251 cqr->status = DASD_CQR_FILLED;
2252 /* Set flags to suppress output for expected errors */
2253 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2258 /* differentiate between 'no record found' and any other error */
2259 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2262 if (init_cqr->status == DASD_CQR_DONE)
2264 else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2265 init_cqr->status == DASD_CQR_FAILED) {
2266 sense = dasd_get_sense(&init_cqr->irb);
2267 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2268 return INIT_CQR_UNFORMATTED;
2270 return INIT_CQR_ERROR;
2272 return INIT_CQR_ERROR;
2276 * This is the callback function for the init_analysis cqr. It saves
2277 * the status of the initial analysis ccw before it frees it and kicks
2278 * the device to continue the startup sequence. This will call
2279 * dasd_eckd_do_analysis again (if the devices has not been marked
2280 * for deletion in the meantime).
2282 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2285 struct dasd_device *device = init_cqr->startdev;
2286 struct dasd_eckd_private *private = device->private;
2288 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2289 dasd_sfree_request(init_cqr, device);
2290 dasd_kick_device(device);
2293 static int dasd_eckd_start_analysis(struct dasd_block *block)
2295 struct dasd_ccw_req *init_cqr;
2297 init_cqr = dasd_eckd_analysis_ccw(block->base);
2298 if (IS_ERR(init_cqr))
2299 return PTR_ERR(init_cqr);
2300 init_cqr->callback = dasd_eckd_analysis_callback;
2301 init_cqr->callback_data = NULL;
2302 init_cqr->expires = 5*HZ;
2303 /* first try without ERP, so we can later handle unformatted
2304 * devices as special case
2306 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2307 init_cqr->retries = 0;
2308 dasd_add_request_head(init_cqr);
2312 static int dasd_eckd_end_analysis(struct dasd_block *block)
2314 struct dasd_device *device = block->base;
2315 struct dasd_eckd_private *private = device->private;
2316 struct eckd_count *count_area;
2317 unsigned int sb, blk_per_trk;
2319 struct dasd_ccw_req *init_cqr;
2321 status = private->init_cqr_status;
2322 private->init_cqr_status = -1;
2323 if (status == INIT_CQR_ERROR) {
2324 /* try again, this time with full ERP */
2325 init_cqr = dasd_eckd_analysis_ccw(device);
2326 dasd_sleep_on(init_cqr);
2327 status = dasd_eckd_analysis_evaluation(init_cqr);
2328 dasd_sfree_request(init_cqr, device);
2331 if (device->features & DASD_FEATURE_USERAW) {
2332 block->bp_block = DASD_RAW_BLOCKSIZE;
2333 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2334 block->s2b_shift = 3;
2338 if (status == INIT_CQR_UNFORMATTED) {
2339 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2340 return -EMEDIUMTYPE;
2341 } else if (status == INIT_CQR_ERROR) {
2342 dev_err(&device->cdev->dev,
2343 "Detecting the DASD disk layout failed because "
2344 "of an I/O error\n");
2348 private->uses_cdl = 1;
2349 /* Check Track 0 for Compatible Disk Layout */
2351 for (i = 0; i < 3; i++) {
2352 if (private->count_area[i].kl != 4 ||
2353 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2354 private->count_area[i].cyl != 0 ||
2355 private->count_area[i].head != count_area_head[i] ||
2356 private->count_area[i].record != count_area_rec[i]) {
2357 private->uses_cdl = 0;
2362 count_area = &private->count_area[3];
2364 if (private->uses_cdl == 0) {
2365 for (i = 0; i < 5; i++) {
2366 if ((private->count_area[i].kl != 0) ||
2367 (private->count_area[i].dl !=
2368 private->count_area[0].dl) ||
2369 private->count_area[i].cyl != 0 ||
2370 private->count_area[i].head != count_area_head[i] ||
2371 private->count_area[i].record != count_area_rec[i])
2375 count_area = &private->count_area[0];
2377 if (private->count_area[3].record == 1)
2378 dev_warn(&device->cdev->dev,
2379 "Track 0 has no records following the VTOC\n");
2382 if (count_area != NULL && count_area->kl == 0) {
2383 /* we found notthing violating our disk layout */
2384 if (dasd_check_blocksize(count_area->dl) == 0)
2385 block->bp_block = count_area->dl;
2387 if (block->bp_block == 0) {
2388 dev_warn(&device->cdev->dev,
2389 "The disk layout of the DASD is not supported\n");
2390 return -EMEDIUMTYPE;
2392 block->s2b_shift = 0; /* bits to shift 512 to get a block */
2393 for (sb = 512; sb < block->bp_block; sb = sb << 1)
2396 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2399 block->blocks = ((unsigned long) private->real_cyl *
2400 private->rdc_data.trk_per_cyl *
2403 dev_info(&device->cdev->dev,
2404 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2405 "%s\n", (block->bp_block >> 10),
2406 (((unsigned long) private->real_cyl *
2407 private->rdc_data.trk_per_cyl *
2408 blk_per_trk * (block->bp_block >> 9)) >> 1),
2409 ((blk_per_trk * block->bp_block) >> 10),
2411 "compatible disk layout" : "linux disk layout");
2416 static int dasd_eckd_do_analysis(struct dasd_block *block)
2418 struct dasd_eckd_private *private = block->base->private;
2420 if (private->init_cqr_status < 0)
2421 return dasd_eckd_start_analysis(block);
2423 return dasd_eckd_end_analysis(block);
2426 static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2428 return dasd_alias_add_device(device);
2431 static int dasd_eckd_online_to_ready(struct dasd_device *device)
2433 if (cancel_work_sync(&device->reload_device))
2434 dasd_put_device(device);
2435 if (cancel_work_sync(&device->kick_validate))
2436 dasd_put_device(device);
2441 static int dasd_eckd_basic_to_known(struct dasd_device *device)
2443 return dasd_alias_remove_device(device);
2447 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2449 struct dasd_eckd_private *private = block->base->private;
2451 if (dasd_check_blocksize(block->bp_block) == 0) {
2452 geo->sectors = recs_per_track(&private->rdc_data,
2453 0, block->bp_block);
2455 geo->cylinders = private->rdc_data.no_cyl;
2456 geo->heads = private->rdc_data.trk_per_cyl;
2461 * Build the TCW request for the format check
2463 static struct dasd_ccw_req *
2464 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2465 int enable_pav, struct eckd_count *fmt_buffer,
2468 struct dasd_eckd_private *start_priv;
2469 struct dasd_device *startdev = NULL;
2470 struct tidaw *last_tidaw = NULL;
2471 struct dasd_ccw_req *cqr;
2479 startdev = dasd_alias_get_start_dev(base);
2484 start_priv = startdev->private;
2486 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2489 * we're adding 'count' amount of tidaw to the itcw.
2490 * calculate the corresponding itcw_size
2492 itcw_size = itcw_calc_size(0, count, 0);
2494 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2498 start_priv->count++;
2500 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2506 cqr->cpaddr = itcw_get_tcw(itcw);
2507 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2508 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2509 sizeof(struct eckd_count),
2510 count * sizeof(struct eckd_count), 0, rpt);
2514 for (i = 0; i < count; i++) {
2515 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2516 sizeof(struct eckd_count));
2517 if (IS_ERR(last_tidaw)) {
2523 last_tidaw->flags |= TIDAW_FLAGS_LAST;
2524 itcw_finalize(itcw);
2527 cqr->startdev = startdev;
2528 cqr->memdev = startdev;
2529 cqr->basedev = base;
2530 cqr->retries = startdev->default_retries;
2531 cqr->expires = startdev->default_expires * HZ;
2532 cqr->buildclk = get_tod_clock();
2533 cqr->status = DASD_CQR_FILLED;
2534 /* Set flags to suppress output for expected errors */
2535 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2536 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2541 dasd_sfree_request(cqr, startdev);
2547 * Build the CCW request for the format check
2549 static struct dasd_ccw_req *
2550 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2551 int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2553 struct dasd_eckd_private *start_priv;
2554 struct dasd_eckd_private *base_priv;
2555 struct dasd_device *startdev = NULL;
2556 struct dasd_ccw_req *cqr;
2559 int cplength, datasize;
2565 startdev = dasd_alias_get_start_dev(base);
2570 start_priv = startdev->private;
2571 base_priv = base->private;
2573 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2575 use_prefix = base_priv->features.feature[8] & 0x01;
2579 datasize = sizeof(struct PFX_eckd_data);
2582 datasize = sizeof(struct DE_eckd_data) +
2583 sizeof(struct LO_eckd_data);
2587 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2591 start_priv->count++;
2596 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2597 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2600 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2601 DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2603 data += sizeof(struct DE_eckd_data);
2604 ccw[-1].flags |= CCW_FLAG_CC;
2606 locate_record(ccw++, data, fdata->start_unit, 0, count,
2607 DASD_ECKD_CCW_READ_COUNT, base, 0);
2610 for (i = 0; i < count; i++) {
2611 ccw[-1].flags |= CCW_FLAG_CC;
2612 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2613 ccw->flags = CCW_FLAG_SLI;
2615 ccw->cda = (__u32)(addr_t) fmt_buffer;
2620 cqr->startdev = startdev;
2621 cqr->memdev = startdev;
2622 cqr->basedev = base;
2623 cqr->retries = DASD_RETRIES;
2624 cqr->expires = startdev->default_expires * HZ;
2625 cqr->buildclk = get_tod_clock();
2626 cqr->status = DASD_CQR_FILLED;
2627 /* Set flags to suppress output for expected errors */
2628 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2633 static struct dasd_ccw_req *
2634 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2635 struct format_data_t *fdata, int enable_pav)
2637 struct dasd_eckd_private *base_priv;
2638 struct dasd_eckd_private *start_priv;
2639 struct dasd_ccw_req *fcp;
2640 struct eckd_count *ect;
2641 struct ch_t address;
2645 int cplength, datasize;
2653 startdev = dasd_alias_get_start_dev(base);
2658 start_priv = startdev->private;
2659 base_priv = base->private;
2661 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2663 nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2666 * fdata->intensity is a bit string that tells us what to do:
2667 * Bit 0: write record zero
2668 * Bit 1: write home address, currently not supported
2669 * Bit 2: invalidate tracks
2670 * Bit 3: use OS/390 compatible disk layout (cdl)
2671 * Bit 4: do not allow storage subsystem to modify record zero
2672 * Only some bit combinations do make sense.
2674 if (fdata->intensity & 0x10) {
2676 intensity = fdata->intensity & ~0x10;
2679 intensity = fdata->intensity;
2682 use_prefix = base_priv->features.feature[8] & 0x01;
2684 switch (intensity) {
2685 case 0x00: /* Normal format */
2686 case 0x08: /* Normal format, use cdl. */
2687 cplength = 2 + (rpt*nr_tracks);
2689 datasize = sizeof(struct PFX_eckd_data) +
2690 sizeof(struct LO_eckd_data) +
2691 rpt * nr_tracks * sizeof(struct eckd_count);
2693 datasize = sizeof(struct DE_eckd_data) +
2694 sizeof(struct LO_eckd_data) +
2695 rpt * nr_tracks * sizeof(struct eckd_count);
2697 case 0x01: /* Write record zero and format track. */
2698 case 0x09: /* Write record zero and format track, use cdl. */
2699 cplength = 2 + rpt * nr_tracks;
2701 datasize = sizeof(struct PFX_eckd_data) +
2702 sizeof(struct LO_eckd_data) +
2703 sizeof(struct eckd_count) +
2704 rpt * nr_tracks * sizeof(struct eckd_count);
2706 datasize = sizeof(struct DE_eckd_data) +
2707 sizeof(struct LO_eckd_data) +
2708 sizeof(struct eckd_count) +
2709 rpt * nr_tracks * sizeof(struct eckd_count);
2711 case 0x04: /* Invalidate track. */
2712 case 0x0c: /* Invalidate track, use cdl. */
2715 datasize = sizeof(struct PFX_eckd_data) +
2716 sizeof(struct LO_eckd_data) +
2717 sizeof(struct eckd_count);
2719 datasize = sizeof(struct DE_eckd_data) +
2720 sizeof(struct LO_eckd_data) +
2721 sizeof(struct eckd_count);
2724 dev_warn(&startdev->cdev->dev,
2725 "An I/O control call used incorrect flags 0x%x\n",
2727 return ERR_PTR(-EINVAL);
2730 fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2734 start_priv->count++;
2738 switch (intensity & ~0x08) {
2739 case 0x00: /* Normal format. */
2741 prefix(ccw++, (struct PFX_eckd_data *) data,
2742 fdata->start_unit, fdata->stop_unit,
2743 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2744 /* grant subsystem permission to format R0 */
2746 ((struct PFX_eckd_data *)data)
2747 ->define_extent.ga_extended |= 0x04;
2748 data += sizeof(struct PFX_eckd_data);
2750 define_extent(ccw++, (struct DE_eckd_data *) data,
2751 fdata->start_unit, fdata->stop_unit,
2752 DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2753 /* grant subsystem permission to format R0 */
2755 ((struct DE_eckd_data *) data)
2756 ->ga_extended |= 0x04;
2757 data += sizeof(struct DE_eckd_data);
2759 ccw[-1].flags |= CCW_FLAG_CC;
2760 locate_record(ccw++, (struct LO_eckd_data *) data,
2761 fdata->start_unit, 0, rpt*nr_tracks,
2762 DASD_ECKD_CCW_WRITE_CKD, base,
2764 data += sizeof(struct LO_eckd_data);
2766 case 0x01: /* Write record zero + format track. */
2768 prefix(ccw++, (struct PFX_eckd_data *) data,
2769 fdata->start_unit, fdata->stop_unit,
2770 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2772 data += sizeof(struct PFX_eckd_data);
2774 define_extent(ccw++, (struct DE_eckd_data *) data,
2775 fdata->start_unit, fdata->stop_unit,
2776 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2777 data += sizeof(struct DE_eckd_data);
2779 ccw[-1].flags |= CCW_FLAG_CC;
2780 locate_record(ccw++, (struct LO_eckd_data *) data,
2781 fdata->start_unit, 0, rpt * nr_tracks + 1,
2782 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2783 base->block->bp_block);
2784 data += sizeof(struct LO_eckd_data);
2786 case 0x04: /* Invalidate track. */
2788 prefix(ccw++, (struct PFX_eckd_data *) data,
2789 fdata->start_unit, fdata->stop_unit,
2790 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2791 data += sizeof(struct PFX_eckd_data);
2793 define_extent(ccw++, (struct DE_eckd_data *) data,
2794 fdata->start_unit, fdata->stop_unit,
2795 DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2796 data += sizeof(struct DE_eckd_data);
2798 ccw[-1].flags |= CCW_FLAG_CC;
2799 locate_record(ccw++, (struct LO_eckd_data *) data,
2800 fdata->start_unit, 0, 1,
2801 DASD_ECKD_CCW_WRITE_CKD, base, 8);
2802 data += sizeof(struct LO_eckd_data);
2806 for (j = 0; j < nr_tracks; j++) {
2807 /* calculate cylinder and head for the current track */
2809 (fdata->start_unit + j) /
2810 base_priv->rdc_data.trk_per_cyl,
2811 (fdata->start_unit + j) %
2812 base_priv->rdc_data.trk_per_cyl);
2813 if (intensity & 0x01) { /* write record zero */
2814 ect = (struct eckd_count *) data;
2815 data += sizeof(struct eckd_count);
2816 ect->cyl = address.cyl;
2817 ect->head = address.head;
2821 ccw[-1].flags |= CCW_FLAG_CC;
2822 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2823 ccw->flags = CCW_FLAG_SLI;
2825 ccw->cda = (__u32)(addr_t) ect;
2828 if ((intensity & ~0x08) & 0x04) { /* erase track */
2829 ect = (struct eckd_count *) data;
2830 data += sizeof(struct eckd_count);
2831 ect->cyl = address.cyl;
2832 ect->head = address.head;
2836 ccw[-1].flags |= CCW_FLAG_CC;
2837 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2838 ccw->flags = CCW_FLAG_SLI;
2840 ccw->cda = (__u32)(addr_t) ect;
2841 } else { /* write remaining records */
2842 for (i = 0; i < rpt; i++) {
2843 ect = (struct eckd_count *) data;
2844 data += sizeof(struct eckd_count);
2845 ect->cyl = address.cyl;
2846 ect->head = address.head;
2847 ect->record = i + 1;
2849 ect->dl = fdata->blksize;
2851 * Check for special tracks 0-1
2852 * when formatting CDL
2854 if ((intensity & 0x08) &&
2855 address.cyl == 0 && address.head == 0) {
2858 ect->dl = sizes_trk0[i] - 4;
2861 if ((intensity & 0x08) &&
2862 address.cyl == 0 && address.head == 1) {
2864 ect->dl = LABEL_SIZE - 44;
2866 ccw[-1].flags |= CCW_FLAG_CC;
2867 if (i != 0 || j == 0)
2869 DASD_ECKD_CCW_WRITE_CKD;
2872 DASD_ECKD_CCW_WRITE_CKD_MT;
2873 ccw->flags = CCW_FLAG_SLI;
2875 ccw->cda = (__u32)(addr_t) ect;
2881 fcp->startdev = startdev;
2882 fcp->memdev = startdev;
2883 fcp->basedev = base;
2885 fcp->expires = startdev->default_expires * HZ;
2886 fcp->buildclk = get_tod_clock();
2887 fcp->status = DASD_CQR_FILLED;
2893 * Wrapper function to build a CCW request depending on input data
2895 static struct dasd_ccw_req *
2896 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2897 struct format_data_t *fdata, int enable_pav,
2898 int tpm, struct eckd_count *fmt_buffer, int rpt)
2900 struct dasd_ccw_req *ccw_req;
2903 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2906 ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2910 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2918 * Sanity checks on format_data
2920 static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2921 struct format_data_t *fdata)
2923 struct dasd_eckd_private *private = base->private;
2925 if (fdata->start_unit >=
2926 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2927 dev_warn(&base->cdev->dev,
2928 "Start track number %u used in formatting is too big\n",
2932 if (fdata->stop_unit >=
2933 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2934 dev_warn(&base->cdev->dev,
2935 "Stop track number %u used in formatting is too big\n",
2939 if (fdata->start_unit > fdata->stop_unit) {
2940 dev_warn(&base->cdev->dev,
2941 "Start track %u used in formatting exceeds end track\n",
2945 if (dasd_check_blocksize(fdata->blksize) != 0) {
2946 dev_warn(&base->cdev->dev,
2947 "The DASD cannot be formatted with block size %u\n",
2955 * This function will process format_data originally coming from an IOCTL
2957 static int dasd_eckd_format_process_data(struct dasd_device *base,
2958 struct format_data_t *fdata,
2959 int enable_pav, int tpm,
2960 struct eckd_count *fmt_buffer, int rpt,
2963 struct dasd_eckd_private *private = base->private;
2964 struct dasd_ccw_req *cqr, *n;
2965 struct list_head format_queue;
2966 struct dasd_device *device;
2968 int old_start, old_stop, format_step;
2972 rc = dasd_eckd_format_sanity_checks(base, fdata);
2976 INIT_LIST_HEAD(&format_queue);
2978 old_start = fdata->start_unit;
2979 old_stop = fdata->stop_unit;
2981 if (!tpm && fmt_buffer != NULL) {
2982 /* Command Mode / Format Check */
2984 } else if (tpm && fmt_buffer != NULL) {
2985 /* Transport Mode / Format Check */
2986 format_step = DASD_CQR_MAX_CCW / rpt;
2988 /* Normal Formatting */
2989 format_step = DASD_CQR_MAX_CCW /
2990 recs_per_track(&private->rdc_data, 0, fdata->blksize);
2995 while (fdata->start_unit <= old_stop) {
2996 step = fdata->stop_unit - fdata->start_unit + 1;
2997 if (step > format_step) {
2999 fdata->start_unit + format_step - 1;
3002 cqr = dasd_eckd_format_build_ccw_req(base, fdata,
3007 if (rc == -ENOMEM) {
3008 if (list_empty(&format_queue))
3011 * not enough memory available, start
3012 * requests retry after first requests
3020 list_add_tail(&cqr->blocklist, &format_queue);
3023 step = fdata->stop_unit - fdata->start_unit + 1;
3024 fmt_buffer += rpt * step;
3026 fdata->start_unit = fdata->stop_unit + 1;
3027 fdata->stop_unit = old_stop;
3030 rc = dasd_sleep_on_queue(&format_queue);
3033 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
3034 device = cqr->startdev;
3035 private = device->private;
3037 if (cqr->status == DASD_CQR_FAILED) {
3039 * Only get sense data if called by format
3042 if (fmt_buffer && irb) {
3043 sense = dasd_get_sense(&cqr->irb);
3044 memcpy(irb, &cqr->irb, sizeof(*irb));
3048 list_del_init(&cqr->blocklist);
3049 dasd_ffree_request(cqr, device);
3053 if (rc && rc != -EIO)
3057 * In case fewer than the expected records are on the
3058 * track, we will most likely get a 'No Record Found'
3059 * error (in command mode) or a 'File Protected' error
3060 * (in transport mode). Those particular cases shouldn't
3061 * pass the -EIO to the IOCTL, therefore reset the rc
3065 (sense[1] & SNS1_NO_REC_FOUND ||
3066 sense[1] & SNS1_FILE_PROTECTED))
3075 fdata->start_unit = old_start;
3076 fdata->stop_unit = old_stop;
3081 static int dasd_eckd_format_device(struct dasd_device *base,
3082 struct format_data_t *fdata, int enable_pav)
3084 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
3088 static bool test_and_set_format_track(struct dasd_format_entry *to_format,
3089 struct dasd_block *block)
3091 struct dasd_format_entry *format;
3092 unsigned long flags;
3095 spin_lock_irqsave(&block->format_lock, flags);
3096 list_for_each_entry(format, &block->format_list, list) {
3097 if (format->track == to_format->track) {
3102 list_add_tail(&to_format->list, &block->format_list);
3105 spin_unlock_irqrestore(&block->format_lock, flags);
3109 static void clear_format_track(struct dasd_format_entry *format,
3110 struct dasd_block *block)
3112 unsigned long flags;
3114 spin_lock_irqsave(&block->format_lock, flags);
3115 list_del_init(&format->list);
3116 spin_unlock_irqrestore(&block->format_lock, flags);
3120 * Callback function to free ESE format requests.
3122 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3124 struct dasd_device *device = cqr->startdev;
3125 struct dasd_eckd_private *private = device->private;
3126 struct dasd_format_entry *format = data;
3128 clear_format_track(format, cqr->basedev->block);
3130 dasd_ffree_request(cqr, device);
3133 static struct dasd_ccw_req *
3134 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3137 struct dasd_eckd_private *private;
3138 struct dasd_format_entry *format;
3139 struct format_data_t fdata;
3140 unsigned int recs_per_trk;
3141 struct dasd_ccw_req *fcqr;
3142 struct dasd_device *base;
3143 struct dasd_block *block;
3144 unsigned int blksize;
3145 struct request *req;
3151 req = cqr->callback_data;
3154 private = base->private;
3155 blksize = block->bp_block;
3156 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3157 format = &startdev->format_entry;
3159 first_trk = blk_rq_pos(req) >> block->s2b_shift;
3160 sector_div(first_trk, recs_per_trk);
3162 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3163 sector_div(last_trk, recs_per_trk);
3164 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3168 if (curr_trk < first_trk || curr_trk > last_trk) {
3169 DBF_DEV_EVENT(DBF_WARNING, startdev,
3170 "ESE error track %llu not within range %llu - %llu\n",
3171 curr_trk, first_trk, last_trk);
3172 return ERR_PTR(-EINVAL);
3174 format->track = curr_trk;
3175 /* test if track is already in formatting by another thread */
3176 if (test_and_set_format_track(format, block))
3177 return ERR_PTR(-EEXIST);
3179 fdata.start_unit = curr_trk;
3180 fdata.stop_unit = curr_trk;
3181 fdata.blksize = blksize;
3182 fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
3184 rc = dasd_eckd_format_sanity_checks(base, &fdata);
3186 return ERR_PTR(-EINVAL);
3189 * We're building the request with PAV disabled as we're reusing
3190 * the former startdev.
3192 fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
3196 fcqr->callback = dasd_eckd_ese_format_cb;
3197 fcqr->callback_data = (void *) format;
3203 * When data is read from an unformatted area of an ESE volume, this function
3204 * returns zeroed data and thereby mimics a read of zero data.
3206 * The first unformatted track is the one that got the NRF error, the address is
3207 * encoded in the sense data.
3209 * All tracks before have returned valid data and should not be touched.
3210 * All tracks after the unformatted track might be formatted or not. This is
3211 * currently not known, remember the processed data and return the remainder of
3212 * the request to the blocklayer in __dasd_cleanup_cqr().
3214 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3216 struct dasd_eckd_private *private;
3217 sector_t first_trk, last_trk;
3218 sector_t first_blk, last_blk;
3219 unsigned int blksize, off;
3220 unsigned int recs_per_trk;
3221 struct dasd_device *base;
3222 struct req_iterator iter;
3223 struct dasd_block *block;
3224 unsigned int skip_block;
3225 unsigned int blk_count;
3226 struct request *req;
3233 req = (struct request *) cqr->callback_data;
3234 base = cqr->block->base;
3235 blksize = base->block->bp_block;
3237 private = base->private;
3241 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3242 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
3243 sector_div(first_trk, recs_per_trk);
3244 last_trk = last_blk =
3245 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3246 sector_div(last_trk, recs_per_trk);
3247 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3251 /* sanity check if the current track from sense data is valid */
3252 if (curr_trk < first_trk || curr_trk > last_trk) {
3253 DBF_DEV_EVENT(DBF_WARNING, base,
3254 "ESE error track %llu not within range %llu - %llu\n",
3255 curr_trk, first_trk, last_trk);
3260 * if not the first track got the NRF error we have to skip over valid
3263 if (curr_trk != first_trk)
3264 skip_block = curr_trk * recs_per_trk - first_blk;
3266 /* we have no information beyond the current track */
3267 end_blk = (curr_trk + 1) * recs_per_trk;
3269 rq_for_each_segment(bv, req, iter) {
3270 dst = page_address(bv.bv_page) + bv.bv_offset;
3271 for (off = 0; off < bv.bv_len; off += blksize) {
3272 if (first_blk + blk_count >= end_blk) {
3273 cqr->proc_bytes = blk_count * blksize;
3276 if (dst && !skip_block) {
3278 memset(dst, 0, blksize);
3289 * Helper function to count consecutive records of a single track.
3291 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3297 head = fmt_buffer[start].head;
3300 * There are 3 conditions where we stop counting:
3301 * - if data reoccurs (same head and record may reoccur), which may
3302 * happen due to the way DASD_ECKD_CCW_READ_COUNT works
3303 * - when the head changes, because we're iterating over several tracks
3304 * then (DASD_ECKD_CCW_READ_COUNT_MT)
3305 * - when we've reached the end of sensible data in the buffer (the
3306 * record will be 0 then)
3308 for (i = start; i < max; i++) {
3310 if ((fmt_buffer[i].head == head &&
3311 fmt_buffer[i].record == 1) ||
3312 fmt_buffer[i].head != head ||
3313 fmt_buffer[i].record == 0)
3322 * Evaluate a given range of tracks. Data like number of records, blocksize,
3323 * record ids, and key length are compared with expected data.
3325 * If a mismatch occurs, the corresponding error bit is set, as well as
3326 * additional information, depending on the error.
3328 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3329 struct format_check_t *cdata,
3330 int rpt_max, int rpt_exp,
3331 int trk_per_cyl, int tpm)
3342 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3343 max_entries = trkcount * rpt_max;
3345 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3346 /* Calculate the correct next starting position in the buffer */
3348 while (fmt_buffer[pos].record == 0 &&
3349 fmt_buffer[pos].dl == 0) {
3350 if (pos++ > max_entries)
3354 if (i != cdata->expect.start_unit)
3355 pos += rpt_max - count;
3358 /* Calculate the expected geo values for the current track */
3359 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3361 /* Count and check number of records */
3362 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3364 if (count < rpt_exp) {
3365 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3368 if (count > rpt_exp) {
3369 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3373 for (j = 0; j < count; j++, pos++) {
3374 blksize = cdata->expect.blksize;
3378 * Set special values when checking CDL formatted
3381 if ((cdata->expect.intensity & 0x08) &&
3382 geo.cyl == 0 && geo.head == 0) {
3384 blksize = sizes_trk0[j] - 4;
3388 if ((cdata->expect.intensity & 0x08) &&
3389 geo.cyl == 0 && geo.head == 1) {
3390 blksize = LABEL_SIZE - 44;
3394 /* Check blocksize */
3395 if (fmt_buffer[pos].dl != blksize) {
3396 cdata->result = DASD_FMT_ERR_BLKSIZE;
3399 /* Check if key length is 0 */
3400 if (fmt_buffer[pos].kl != kl) {
3401 cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3404 /* Check if record_id is correct */
3405 if (fmt_buffer[pos].cyl != geo.cyl ||
3406 fmt_buffer[pos].head != geo.head ||
3407 fmt_buffer[pos].record != (j + 1)) {
3408 cdata->result = DASD_FMT_ERR_RECORD_ID;
3416 * In case of no errors, we need to decrease by one
3417 * to get the correct positions.
3419 if (!cdata->result) {
3425 cdata->num_records = count;
3426 cdata->rec = fmt_buffer[pos].record;
3427 cdata->blksize = fmt_buffer[pos].dl;
3428 cdata->key_length = fmt_buffer[pos].kl;
3432 * Check the format of a range of tracks of a DASD.
3434 static int dasd_eckd_check_device_format(struct dasd_device *base,
3435 struct format_check_t *cdata,
3438 struct dasd_eckd_private *private = base->private;
3439 struct eckd_count *fmt_buffer;
3441 int rpt_max, rpt_exp;
3442 int fmt_buffer_size;
3448 trk_per_cyl = private->rdc_data.trk_per_cyl;
3450 /* Get maximum and expected amount of records per track */
3451 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3452 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3454 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3455 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3457 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3462 * A certain FICON feature subset is needed to operate in transport
3463 * mode. Additionally, the support for transport mode is implicitly
3464 * checked by comparing the buffer size with fcx_max_data. As long as
3465 * the buffer size is smaller we can operate in transport mode and
3466 * process multiple tracks. If not, only one track at once is being
3467 * processed using command mode.
3469 if ((private->features.feature[40] & 0x04) &&
3470 fmt_buffer_size <= private->fcx_max_data)
3473 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3474 tpm, fmt_buffer, rpt_max, &irb);
3475 if (rc && rc != -EIO)
3479 * If our first attempt with transport mode enabled comes back
3480 * with an incorrect length error, we're going to retry the
3481 * check with command mode.
3483 if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3485 rc = dasd_eckd_format_process_data(base, &cdata->expect,
3487 fmt_buffer, rpt_max,
3496 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3505 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3507 if (cqr->retries < 0) {
3508 cqr->status = DASD_CQR_FAILED;
3511 cqr->status = DASD_CQR_FILLED;
3512 if (cqr->block && (cqr->startdev != cqr->block->base)) {
3513 dasd_eckd_reset_ccw_to_base_io(cqr);
3514 cqr->startdev = cqr->block->base;
3515 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3519 static dasd_erp_fn_t
3520 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3522 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3523 struct ccw_device *cdev = device->cdev;
3525 switch (cdev->id.cu_type) {
3530 return dasd_3990_erp_action;
3534 return dasd_default_erp_action;
3538 static dasd_erp_fn_t
3539 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3541 return dasd_default_erp_postaction;
3544 static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3545 struct dasd_ccw_req *cqr,
3550 struct dasd_eckd_private *private = device->private;
3552 /* first of all check for state change pending interrupt */
3553 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3554 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3556 * for alias only, not in offline processing
3557 * and only if not suspended
3559 if (!device->block && private->lcu &&
3560 device->state == DASD_STATE_ONLINE &&
3561 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3562 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3563 /* schedule worker to reload device */
3564 dasd_reload_device(device);
3566 dasd_generic_handle_state_change(device);
3570 sense = dasd_get_sense(irb);
3574 /* summary unit check */
3575 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3576 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3577 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3578 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3579 "eckd suc: device already notified");
3582 sense = dasd_get_sense(irb);
3584 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3585 "eckd suc: no reason code available");
3586 clear_bit(DASD_FLAG_SUC, &device->flags);
3590 private->suc_reason = sense[8];
3591 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3592 "eckd handle summary unit check: reason",
3593 private->suc_reason);
3594 dasd_get_device(device);
3595 if (!schedule_work(&device->suc_work))
3596 dasd_put_device(device);
3601 /* service information message SIM */
3602 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3603 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3604 dasd_3990_erp_handle_sim(device, sense);
3608 /* loss of device reservation is handled via base devices only
3609 * as alias devices may be used with several bases
3611 if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3612 (sense[7] == 0x3F) &&
3613 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3614 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3615 if (device->features & DASD_FEATURE_FAILONSLCK)
3616 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3617 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3618 dev_err(&device->cdev->dev,
3619 "The device reservation was lost\n");
3623 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3624 unsigned int first_trk,
3625 unsigned int last_trk)
3627 struct dasd_eckd_private *private = device->private;
3628 unsigned int trks_per_vol;
3631 trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3633 if (first_trk >= trks_per_vol) {
3634 dev_warn(&device->cdev->dev,
3635 "Start track number %u used in the space release command is too big\n",
3638 } else if (last_trk >= trks_per_vol) {
3639 dev_warn(&device->cdev->dev,
3640 "Stop track number %u used in the space release command is too big\n",
3643 } else if (first_trk > last_trk) {
3644 dev_warn(&device->cdev->dev,
3645 "Start track %u used in the space release command exceeds the end track\n",
3653 * Helper function to count the amount of involved extents within a given range
3654 * with extent alignment in mind.
3656 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3665 /* Count first partial extent */
3666 if (from % trks_per_ext != 0) {
3667 tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3670 cur_pos = tmp - from + 1;
3673 /* Count full extents */
3674 if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3675 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3676 count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3679 /* Count last partial extent */
3687 * Release allocated space for a given range or an entire volume.
3689 static struct dasd_ccw_req *
3690 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3691 struct request *req, unsigned int first_trk,
3692 unsigned int last_trk, int by_extent)
3694 struct dasd_eckd_private *private = device->private;
3695 struct dasd_dso_ras_ext_range *ras_range;
3696 struct dasd_rssd_features *features;
3697 struct dasd_dso_ras_data *ras_data;
3698 u16 heads, beg_head, end_head;
3699 int cur_to_trk, cur_from_trk;
3700 struct dasd_ccw_req *cqr;
3701 u32 beg_cyl, end_cyl;
3710 if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3711 return ERR_PTR(-EINVAL);
3713 rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3715 features = &private->features;
3717 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3720 nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3721 ras_size = sizeof(*ras_data);
3722 size = ras_size + (nr_exts * sizeof(*ras_range));
3724 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3726 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3727 "Could not allocate RAS request");
3731 ras_data = cqr->data;
3732 memset(ras_data, 0, size);
3734 ras_data->order = DSO_ORDER_RAS;
3735 ras_data->flags.vol_type = 0; /* CKD volume */
3736 /* Release specified extents or entire volume */
3737 ras_data->op_flags.by_extent = by_extent;
3739 * This bit guarantees initialisation of tracks within an extent that is
3740 * not fully specified, but is only supported with a certain feature
3743 ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
3744 ras_data->lss = private->ned->ID;
3745 ras_data->dev_addr = private->ned->unit_addr;
3746 ras_data->nr_exts = nr_exts;
3749 heads = private->rdc_data.trk_per_cyl;
3750 cur_from_trk = first_trk;
3751 cur_to_trk = first_trk + trks_per_ext -
3752 (first_trk % trks_per_ext) - 1;
3753 if (cur_to_trk > last_trk)
3754 cur_to_trk = last_trk;
3755 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3757 for (i = 0; i < nr_exts; i++) {
3758 beg_cyl = cur_from_trk / heads;
3759 beg_head = cur_from_trk % heads;
3760 end_cyl = cur_to_trk / heads;
3761 end_head = cur_to_trk % heads;
3763 set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3764 set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3766 cur_from_trk = cur_to_trk + 1;
3767 cur_to_trk = cur_from_trk + trks_per_ext - 1;
3768 if (cur_to_trk > last_trk)
3769 cur_to_trk = last_trk;
3775 ccw->cda = (__u32)(addr_t)cqr->data;
3776 ccw->cmd_code = DASD_ECKD_CCW_DSO;
3779 cqr->startdev = device;
3780 cqr->memdev = device;
3783 cqr->expires = device->default_expires * HZ;
3784 cqr->buildclk = get_tod_clock();
3785 cqr->status = DASD_CQR_FILLED;
3790 static int dasd_eckd_release_space_full(struct dasd_device *device)
3792 struct dasd_ccw_req *cqr;
3795 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3797 return PTR_ERR(cqr);
3799 rc = dasd_sleep_on_interruptible(cqr);
3801 dasd_sfree_request(cqr, cqr->memdev);
3806 static int dasd_eckd_release_space_trks(struct dasd_device *device,
3807 unsigned int from, unsigned int to)
3809 struct dasd_eckd_private *private = device->private;
3810 struct dasd_block *block = device->block;
3811 struct dasd_ccw_req *cqr, *n;
3812 struct list_head ras_queue;
3813 unsigned int device_exts;
3820 INIT_LIST_HEAD(&ras_queue);
3822 device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3823 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3825 /* Make sure device limits are not exceeded */
3826 step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3831 while (cur_pos < to) {
3832 stop = cur_pos + step -
3833 ((cur_pos + step) % trks_per_ext) - 1;
3837 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3840 if (rc == -ENOMEM) {
3841 if (list_empty(&ras_queue))
3849 spin_lock_irq(&block->queue_lock);
3850 list_add_tail(&cqr->blocklist, &ras_queue);
3851 spin_unlock_irq(&block->queue_lock);
3855 rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3858 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3859 device = cqr->startdev;
3860 private = device->private;
3862 spin_lock_irq(&block->queue_lock);
3863 list_del_init(&cqr->blocklist);
3864 spin_unlock_irq(&block->queue_lock);
3865 dasd_sfree_request(cqr, device);
3874 static int dasd_eckd_release_space(struct dasd_device *device,
3875 struct format_data_t *rdata)
3877 if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3878 return dasd_eckd_release_space_full(device);
3879 else if (rdata->intensity == 0)
3880 return dasd_eckd_release_space_trks(device, rdata->start_unit,
3886 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3887 struct dasd_device *startdev,
3888 struct dasd_block *block,
3889 struct request *req,
3894 unsigned int first_offs,
3895 unsigned int last_offs,
3896 unsigned int blk_per_trk,
3897 unsigned int blksize)
3899 struct dasd_eckd_private *private;
3900 unsigned long *idaws;
3901 struct LO_eckd_data *LO_data;
3902 struct dasd_ccw_req *cqr;
3904 struct req_iterator iter;
3908 int count, cidaw, cplength, datasize;
3910 unsigned char cmd, rcmd;
3912 struct dasd_device *basedev;
3914 basedev = block->base;
3915 private = basedev->private;
3916 if (rq_data_dir(req) == READ)
3917 cmd = DASD_ECKD_CCW_READ_MT;
3918 else if (rq_data_dir(req) == WRITE)
3919 cmd = DASD_ECKD_CCW_WRITE_MT;
3921 return ERR_PTR(-EINVAL);
3923 /* Check struct bio and count the number of blocks for the request. */
3926 rq_for_each_segment(bv, req, iter) {
3927 if (bv.bv_len & (blksize - 1))
3928 /* Eckd can only do full blocks. */
3929 return ERR_PTR(-EINVAL);
3930 count += bv.bv_len >> (block->s2b_shift + 9);
3931 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3932 cidaw += bv.bv_len >> (block->s2b_shift + 9);
3935 if (count != last_rec - first_rec + 1)
3936 return ERR_PTR(-EINVAL);
3938 /* use the prefix command if available */
3939 use_prefix = private->features.feature[8] & 0x01;
3941 /* 1x prefix + number of blocks */
3942 cplength = 2 + count;
3943 /* 1x prefix + cidaws*sizeof(long) */
3944 datasize = sizeof(struct PFX_eckd_data) +
3945 sizeof(struct LO_eckd_data) +
3946 cidaw * sizeof(unsigned long);
3948 /* 1x define extent + 1x locate record + number of blocks */
3949 cplength = 2 + count;
3950 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
3951 datasize = sizeof(struct DE_eckd_data) +
3952 sizeof(struct LO_eckd_data) +
3953 cidaw * sizeof(unsigned long);
3955 /* Find out the number of additional locate record ccws for cdl. */
3956 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
3957 if (last_rec >= 2*blk_per_trk)
3958 count = 2*blk_per_trk - first_rec;
3960 datasize += count*sizeof(struct LO_eckd_data);
3962 /* Allocate the ccw request. */
3963 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3964 startdev, blk_mq_rq_to_pdu(req));
3968 /* First ccw is define extent or prefix. */
3970 if (prefix(ccw++, cqr->data, first_trk,
3971 last_trk, cmd, basedev, startdev) == -EAGAIN) {
3972 /* Clock not in sync and XRC is enabled.
3975 dasd_sfree_request(cqr, startdev);
3976 return ERR_PTR(-EAGAIN);
3978 idaws = (unsigned long *) (cqr->data +
3979 sizeof(struct PFX_eckd_data));
3981 if (define_extent(ccw++, cqr->data, first_trk,
3982 last_trk, cmd, basedev, 0) == -EAGAIN) {
3983 /* Clock not in sync and XRC is enabled.
3986 dasd_sfree_request(cqr, startdev);
3987 return ERR_PTR(-EAGAIN);
3989 idaws = (unsigned long *) (cqr->data +
3990 sizeof(struct DE_eckd_data));
3992 /* Build locate_record+read/write/ccws. */
3993 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
3995 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
3996 /* Only standard blocks so there is just one locate record. */
3997 ccw[-1].flags |= CCW_FLAG_CC;
3998 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
3999 last_rec - recid + 1, cmd, basedev, blksize);
4001 rq_for_each_segment(bv, req, iter) {
4002 dst = page_address(bv.bv_page) + bv.bv_offset;
4003 if (dasd_page_cache) {
4004 char *copy = kmem_cache_alloc(dasd_page_cache,
4005 GFP_DMA | __GFP_NOWARN);
4006 if (copy && rq_data_dir(req) == WRITE)
4007 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
4009 dst = copy + bv.bv_offset;
4011 for (off = 0; off < bv.bv_len; off += blksize) {
4012 sector_t trkid = recid;
4013 unsigned int recoffs = sector_div(trkid, blk_per_trk);
4016 /* Locate record for cdl special block ? */
4017 if (private->uses_cdl && recid < 2*blk_per_trk) {
4018 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
4020 count = dasd_eckd_cdl_reclen(recid);
4021 if (count < blksize &&
4022 rq_data_dir(req) == READ)
4023 memset(dst + count, 0xe5,
4026 ccw[-1].flags |= CCW_FLAG_CC;
4027 locate_record(ccw++, LO_data++,
4029 1, rcmd, basedev, count);
4031 /* Locate record for standard blocks ? */
4032 if (private->uses_cdl && recid == 2*blk_per_trk) {
4033 ccw[-1].flags |= CCW_FLAG_CC;
4034 locate_record(ccw++, LO_data++,
4036 last_rec - recid + 1,
4037 cmd, basedev, count);
4039 /* Read/write ccw. */
4040 ccw[-1].flags |= CCW_FLAG_CC;
4041 ccw->cmd_code = rcmd;
4043 if (idal_is_needed(dst, blksize)) {
4044 ccw->cda = (__u32)(addr_t) idaws;
4045 ccw->flags = CCW_FLAG_IDA;
4046 idaws = idal_create_words(idaws, dst, blksize);
4048 ccw->cda = (__u32)(addr_t) dst;
4056 if (blk_noretry_request(req) ||
4057 block->base->features & DASD_FEATURE_FAILFAST)
4058 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4059 cqr->startdev = startdev;
4060 cqr->memdev = startdev;
4062 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4063 cqr->lpm = dasd_path_get_ppm(startdev);
4064 cqr->retries = startdev->default_retries;
4065 cqr->buildclk = get_tod_clock();
4066 cqr->status = DASD_CQR_FILLED;
4068 /* Set flags to suppress output for expected errors */
4069 if (dasd_eckd_is_ese(basedev)) {
4070 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4071 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4072 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4078 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
4079 struct dasd_device *startdev,
4080 struct dasd_block *block,
4081 struct request *req,
4086 unsigned int first_offs,
4087 unsigned int last_offs,
4088 unsigned int blk_per_trk,
4089 unsigned int blksize)
4091 unsigned long *idaws;
4092 struct dasd_ccw_req *cqr;
4094 struct req_iterator iter;
4096 char *dst, *idaw_dst;
4097 unsigned int cidaw, cplength, datasize;
4101 struct dasd_device *basedev;
4102 unsigned int trkcount, count, count_to_trk_end;
4103 unsigned int idaw_len, seg_len, part_len, len_to_track_end;
4104 unsigned char new_track, end_idaw;
4106 unsigned int recoffs;
4108 basedev = block->base;
4109 if (rq_data_dir(req) == READ)
4110 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4111 else if (rq_data_dir(req) == WRITE)
4112 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4114 return ERR_PTR(-EINVAL);
4116 /* Track based I/O needs IDAWs for each page, and not just for
4117 * 64 bit addresses. We need additional idals for pages
4118 * that get filled from two tracks, so we use the number
4119 * of records as upper limit.
4121 cidaw = last_rec - first_rec + 1;
4122 trkcount = last_trk - first_trk + 1;
4124 /* 1x prefix + one read/write ccw per track */
4125 cplength = 1 + trkcount;
4127 datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
4129 /* Allocate the ccw request. */
4130 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4131 startdev, blk_mq_rq_to_pdu(req));
4135 /* transfer length factor: how many bytes to read from the last track */
4136 if (first_trk == last_trk)
4137 tlf = last_offs - first_offs + 1;
4139 tlf = last_offs + 1;
4142 if (prefix_LRE(ccw++, cqr->data, first_trk,
4143 last_trk, cmd, basedev, startdev,
4144 1 /* format */, first_offs + 1,
4147 /* Clock not in sync and XRC is enabled.
4150 dasd_sfree_request(cqr, startdev);
4151 return ERR_PTR(-EAGAIN);
4155 * The translation of request into ccw programs must meet the
4156 * following conditions:
4157 * - all idaws but the first and the last must address full pages
4158 * (or 2K blocks on 31-bit)
4159 * - the scope of a ccw and it's idal ends with the track boundaries
4161 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
4165 len_to_track_end = 0;
4168 rq_for_each_segment(bv, req, iter) {
4169 dst = page_address(bv.bv_page) + bv.bv_offset;
4170 seg_len = bv.bv_len;
4174 recoffs = sector_div(trkid, blk_per_trk);
4175 count_to_trk_end = blk_per_trk - recoffs;
4176 count = min((last_rec - recid + 1),
4177 (sector_t)count_to_trk_end);
4178 len_to_track_end = count * blksize;
4179 ccw[-1].flags |= CCW_FLAG_CC;
4180 ccw->cmd_code = cmd;
4181 ccw->count = len_to_track_end;
4182 ccw->cda = (__u32)(addr_t)idaws;
4183 ccw->flags = CCW_FLAG_IDA;
4187 /* first idaw for a ccw may start anywhere */
4191 /* If we start a new idaw, we must make sure that it
4192 * starts on an IDA_BLOCK_SIZE boundary.
4193 * If we continue an idaw, we must make sure that the
4194 * current segment begins where the so far accumulated
4198 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
4199 dasd_sfree_request(cqr, startdev);
4200 return ERR_PTR(-ERANGE);
4204 if ((idaw_dst + idaw_len) != dst) {
4205 dasd_sfree_request(cqr, startdev);
4206 return ERR_PTR(-ERANGE);
4208 part_len = min(seg_len, len_to_track_end);
4209 seg_len -= part_len;
4211 idaw_len += part_len;
4212 len_to_track_end -= part_len;
4213 /* collected memory area ends on an IDA_BLOCK border,
4215 * idal_create_words will handle cases where idaw_len
4216 * is larger then IDA_BLOCK_SIZE
4218 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
4220 /* We also need to end the idaw at track end */
4221 if (!len_to_track_end) {
4226 idaws = idal_create_words(idaws, idaw_dst,
4235 if (blk_noretry_request(req) ||
4236 block->base->features & DASD_FEATURE_FAILFAST)
4237 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4238 cqr->startdev = startdev;
4239 cqr->memdev = startdev;
4241 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4242 cqr->lpm = dasd_path_get_ppm(startdev);
4243 cqr->retries = startdev->default_retries;
4244 cqr->buildclk = get_tod_clock();
4245 cqr->status = DASD_CQR_FILLED;
4247 /* Set flags to suppress output for expected errors */
4248 if (dasd_eckd_is_ese(basedev))
4249 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4254 static int prepare_itcw(struct itcw *itcw,
4255 unsigned int trk, unsigned int totrk, int cmd,
4256 struct dasd_device *basedev,
4257 struct dasd_device *startdev,
4258 unsigned int rec_on_trk, int count,
4259 unsigned int blksize,
4260 unsigned int total_data_size,
4262 unsigned int blk_per_trk)
4264 struct PFX_eckd_data pfxdata;
4265 struct dasd_eckd_private *basepriv, *startpriv;
4266 struct DE_eckd_data *dedata;
4267 struct LRE_eckd_data *lredata;
4271 u16 heads, beghead, endhead;
4279 /* setup prefix data */
4280 basepriv = basedev->private;
4281 startpriv = startdev->private;
4282 dedata = &pfxdata.define_extent;
4283 lredata = &pfxdata.locate_record;
4285 memset(&pfxdata, 0, sizeof(pfxdata));
4286 pfxdata.format = 1; /* PFX with LRE */
4287 pfxdata.base_address = basepriv->ned->unit_addr;
4288 pfxdata.base_lss = basepriv->ned->ID;
4289 pfxdata.validity.define_extent = 1;
4291 /* private uid is kept up to date, conf_data may be outdated */
4292 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4293 pfxdata.validity.verify_base = 1;
4295 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
4296 pfxdata.validity.verify_base = 1;
4297 pfxdata.validity.hyper_pav = 1;
4301 case DASD_ECKD_CCW_READ_TRACK_DATA:
4302 dedata->mask.perm = 0x1;
4303 dedata->attributes.operation = basepriv->attrib.operation;
4304 dedata->blk_size = blksize;
4305 dedata->ga_extended |= 0x42;
4306 lredata->operation.orientation = 0x0;
4307 lredata->operation.operation = 0x0C;
4308 lredata->auxiliary.check_bytes = 0x01;
4309 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4311 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4312 dedata->mask.perm = 0x02;
4313 dedata->attributes.operation = basepriv->attrib.operation;
4314 dedata->blk_size = blksize;
4315 rc = set_timestamp(NULL, dedata, basedev);
4316 dedata->ga_extended |= 0x42;
4317 lredata->operation.orientation = 0x0;
4318 lredata->operation.operation = 0x3F;
4319 lredata->extended_operation = 0x23;
4320 lredata->auxiliary.check_bytes = 0x2;
4322 * If XRC is supported the System Time Stamp is set. The
4323 * validity of the time stamp must be reflected in the prefix
4326 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4327 pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
4328 pfx_cmd = DASD_ECKD_CCW_PFX;
4330 case DASD_ECKD_CCW_READ_COUNT_MT:
4331 dedata->mask.perm = 0x1;
4332 dedata->attributes.operation = DASD_BYPASS_CACHE;
4333 dedata->ga_extended |= 0x42;
4334 dedata->blk_size = blksize;
4335 lredata->operation.orientation = 0x2;
4336 lredata->operation.operation = 0x16;
4337 lredata->auxiliary.check_bytes = 0x01;
4338 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4341 DBF_DEV_EVENT(DBF_ERR, basedev,
4342 "prepare itcw, unknown opcode 0x%x", cmd);
4349 dedata->attributes.mode = 0x3; /* ECKD */
4351 heads = basepriv->rdc_data.trk_per_cyl;
4352 begcyl = trk / heads;
4353 beghead = trk % heads;
4354 endcyl = totrk / heads;
4355 endhead = totrk % heads;
4357 /* check for sequential prestage - enhance cylinder range */
4358 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4359 dedata->attributes.operation == DASD_SEQ_ACCESS) {
4361 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4362 endcyl += basepriv->attrib.nr_cyl;
4364 endcyl = (basepriv->real_cyl - 1);
4367 set_ch_t(&dedata->beg_ext, begcyl, beghead);
4368 set_ch_t(&dedata->end_ext, endcyl, endhead);
4370 dedata->ep_format = 0x20; /* records per track is valid */
4371 dedata->ep_rec_per_track = blk_per_trk;
4374 switch (basepriv->rdc_data.dev_type) {
4376 dn = ceil_quot(blksize + 6, 232);
4377 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4378 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4381 d = 7 + ceil_quot(blksize + 12, 32);
4382 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4387 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4388 lredata->auxiliary.length_valid = 0;
4389 lredata->auxiliary.length_scope = 0;
4390 lredata->sector = 0xff;
4392 lredata->auxiliary.length_valid = 1;
4393 lredata->auxiliary.length_scope = 1;
4394 lredata->sector = sector;
4396 lredata->auxiliary.imbedded_ccw_valid = 1;
4397 lredata->length = tlf;
4398 lredata->imbedded_ccw = cmd;
4399 lredata->count = count;
4400 set_ch_t(&lredata->seek_addr, begcyl, beghead);
4401 lredata->search_arg.cyl = lredata->seek_addr.cyl;
4402 lredata->search_arg.head = lredata->seek_addr.head;
4403 lredata->search_arg.record = rec_on_trk;
4405 dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4406 &pfxdata, sizeof(pfxdata), total_data_size);
4407 return PTR_ERR_OR_ZERO(dcw);
4410 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4411 struct dasd_device *startdev,
4412 struct dasd_block *block,
4413 struct request *req,
4418 unsigned int first_offs,
4419 unsigned int last_offs,
4420 unsigned int blk_per_trk,
4421 unsigned int blksize)
4423 struct dasd_ccw_req *cqr;
4424 struct req_iterator iter;
4427 unsigned int trkcount, ctidaw;
4429 struct dasd_device *basedev;
4432 struct tidaw *last_tidaw = NULL;
4436 unsigned int seg_len, part_len, len_to_track_end;
4437 unsigned char new_track;
4438 sector_t recid, trkid;
4440 unsigned int count, count_to_trk_end;
4443 basedev = block->base;
4444 if (rq_data_dir(req) == READ) {
4445 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4446 itcw_op = ITCW_OP_READ;
4447 } else if (rq_data_dir(req) == WRITE) {
4448 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4449 itcw_op = ITCW_OP_WRITE;
4451 return ERR_PTR(-EINVAL);
4453 /* trackbased I/O needs address all memory via TIDAWs,
4454 * not just for 64 bit addresses. This allows us to map
4455 * each segment directly to one tidaw.
4456 * In the case of write requests, additional tidaws may
4457 * be needed when a segment crosses a track boundary.
4459 trkcount = last_trk - first_trk + 1;
4461 rq_for_each_segment(bv, req, iter) {
4464 if (rq_data_dir(req) == WRITE)
4465 ctidaw += (last_trk - first_trk);
4467 /* Allocate the ccw request. */
4468 itcw_size = itcw_calc_size(0, ctidaw, 0);
4469 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4470 blk_mq_rq_to_pdu(req));
4474 /* transfer length factor: how many bytes to read from the last track */
4475 if (first_trk == last_trk)
4476 tlf = last_offs - first_offs + 1;
4478 tlf = last_offs + 1;
4481 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4486 cqr->cpaddr = itcw_get_tcw(itcw);
4487 if (prepare_itcw(itcw, first_trk, last_trk,
4488 cmd, basedev, startdev,
4491 (last_rec - first_rec + 1) * blksize,
4492 tlf, blk_per_trk) == -EAGAIN) {
4493 /* Clock not in sync and XRC is enabled.
4499 len_to_track_end = 0;
4501 * A tidaw can address 4k of memory, but must not cross page boundaries
4502 * We can let the block layer handle this by setting
4503 * blk_queue_segment_boundary to page boundaries and
4504 * blk_max_segment_size to page size when setting up the request queue.
4505 * For write requests, a TIDAW must not cross track boundaries, because
4506 * we have to set the CBC flag on the last tidaw for each track.
4508 if (rq_data_dir(req) == WRITE) {
4511 rq_for_each_segment(bv, req, iter) {
4512 dst = page_address(bv.bv_page) + bv.bv_offset;
4513 seg_len = bv.bv_len;
4517 offs = sector_div(trkid, blk_per_trk);
4518 count_to_trk_end = blk_per_trk - offs;
4519 count = min((last_rec - recid + 1),
4520 (sector_t)count_to_trk_end);
4521 len_to_track_end = count * blksize;
4525 part_len = min(seg_len, len_to_track_end);
4526 seg_len -= part_len;
4527 len_to_track_end -= part_len;
4528 /* We need to end the tidaw at track end */
4529 if (!len_to_track_end) {
4531 tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4534 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4536 if (IS_ERR(last_tidaw)) {
4544 rq_for_each_segment(bv, req, iter) {
4545 dst = page_address(bv.bv_page) + bv.bv_offset;
4546 last_tidaw = itcw_add_tidaw(itcw, 0x00,
4548 if (IS_ERR(last_tidaw)) {
4554 last_tidaw->flags |= TIDAW_FLAGS_LAST;
4555 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4556 itcw_finalize(itcw);
4558 if (blk_noretry_request(req) ||
4559 block->base->features & DASD_FEATURE_FAILFAST)
4560 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4562 cqr->startdev = startdev;
4563 cqr->memdev = startdev;
4565 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4566 cqr->lpm = dasd_path_get_ppm(startdev);
4567 cqr->retries = startdev->default_retries;
4568 cqr->buildclk = get_tod_clock();
4569 cqr->status = DASD_CQR_FILLED;
4571 /* Set flags to suppress output for expected errors */
4572 if (dasd_eckd_is_ese(basedev)) {
4573 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4574 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4575 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4580 dasd_sfree_request(cqr, startdev);
4581 return ERR_PTR(ret);
4584 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4585 struct dasd_block *block,
4586 struct request *req)
4591 struct dasd_eckd_private *private;
4592 struct dasd_device *basedev;
4593 sector_t first_rec, last_rec;
4594 sector_t first_trk, last_trk;
4595 unsigned int first_offs, last_offs;
4596 unsigned int blk_per_trk, blksize;
4598 unsigned int data_size;
4599 struct dasd_ccw_req *cqr;
4601 basedev = block->base;
4602 private = basedev->private;
4604 /* Calculate number of blocks/records per track. */
4605 blksize = block->bp_block;
4606 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4607 if (blk_per_trk == 0)
4608 return ERR_PTR(-EINVAL);
4609 /* Calculate record id of first and last block. */
4610 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4611 first_offs = sector_div(first_trk, blk_per_trk);
4612 last_rec = last_trk =
4613 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4614 last_offs = sector_div(last_trk, blk_per_trk);
4615 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4617 fcx_multitrack = private->features.feature[40] & 0x20;
4618 data_size = blk_rq_bytes(req);
4619 if (data_size % blksize)
4620 return ERR_PTR(-EINVAL);
4621 /* tpm write request add CBC data on each track boundary */
4622 if (rq_data_dir(req) == WRITE)
4623 data_size += (last_trk - first_trk) * 4;
4625 /* is read track data and write track data in command mode supported? */
4626 cmdrtd = private->features.feature[9] & 0x20;
4627 cmdwtd = private->features.feature[12] & 0x40;
4628 use_prefix = private->features.feature[8] & 0x01;
4631 if (cdlspecial || dasd_page_cache) {
4632 /* do nothing, just fall through to the cmd mode single case */
4633 } else if ((data_size <= private->fcx_max_data)
4634 && (fcx_multitrack || (first_trk == last_trk))) {
4635 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4636 first_rec, last_rec,
4637 first_trk, last_trk,
4638 first_offs, last_offs,
4639 blk_per_trk, blksize);
4640 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4641 (PTR_ERR(cqr) != -ENOMEM))
4643 } else if (use_prefix &&
4644 (((rq_data_dir(req) == READ) && cmdrtd) ||
4645 ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4646 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4647 first_rec, last_rec,
4648 first_trk, last_trk,
4649 first_offs, last_offs,
4650 blk_per_trk, blksize);
4651 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4652 (PTR_ERR(cqr) != -ENOMEM))
4656 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4657 first_rec, last_rec,
4658 first_trk, last_trk,
4659 first_offs, last_offs,
4660 blk_per_trk, blksize);
4664 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4665 struct dasd_block *block,
4666 struct request *req)
4668 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4669 unsigned int seg_len, len_to_track_end;
4670 unsigned int cidaw, cplength, datasize;
4671 sector_t first_trk, last_trk, sectors;
4672 struct dasd_eckd_private *base_priv;
4673 struct dasd_device *basedev;
4674 struct req_iterator iter;
4675 struct dasd_ccw_req *cqr;
4676 unsigned int first_offs;
4677 unsigned int trkcount;
4678 unsigned long *idaws;
4688 * raw track access needs to be mutiple of 64k and on 64k boundary
4689 * For read requests we can fix an incorrect alignment by padding
4690 * the request with dummy pages.
4692 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4693 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4694 DASD_RAW_SECTORS_PER_TRACK;
4695 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4696 DASD_RAW_SECTORS_PER_TRACK;
4697 basedev = block->base;
4698 if ((start_padding_sectors || end_padding_sectors) &&
4699 (rq_data_dir(req) == WRITE)) {
4700 DBF_DEV_EVENT(DBF_ERR, basedev,
4701 "raw write not track aligned (%llu,%llu) req %p",
4702 start_padding_sectors, end_padding_sectors, req);
4703 return ERR_PTR(-EINVAL);
4706 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4707 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4708 DASD_RAW_SECTORS_PER_TRACK;
4709 trkcount = last_trk - first_trk + 1;
4712 if (rq_data_dir(req) == READ)
4713 cmd = DASD_ECKD_CCW_READ_TRACK;
4714 else if (rq_data_dir(req) == WRITE)
4715 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4717 return ERR_PTR(-EINVAL);
4720 * Raw track based I/O needs IDAWs for each page,
4721 * and not just for 64 bit addresses.
4723 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4726 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4727 * of extended parameter. This is needed for write full track.
4729 base_priv = basedev->private;
4730 use_prefix = base_priv->features.feature[8] & 0x01;
4732 cplength = 1 + trkcount;
4733 size = sizeof(struct PFX_eckd_data) + 2;
4735 cplength = 2 + trkcount;
4736 size = sizeof(struct DE_eckd_data) +
4737 sizeof(struct LRE_eckd_data) + 2;
4739 size = ALIGN(size, 8);
4741 datasize = size + cidaw * sizeof(unsigned long);
4743 /* Allocate the ccw request. */
4744 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4745 datasize, startdev, blk_mq_rq_to_pdu(req));
4753 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4754 startdev, 1, first_offs + 1, trkcount, 0, 0);
4756 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4757 ccw[-1].flags |= CCW_FLAG_CC;
4759 data += sizeof(struct DE_eckd_data);
4760 locate_record_ext(ccw++, data, first_trk, first_offs + 1,
4761 trkcount, cmd, basedev, 0, 0);
4764 idaws = (unsigned long *)(cqr->data + size);
4765 len_to_track_end = 0;
4766 if (start_padding_sectors) {
4767 ccw[-1].flags |= CCW_FLAG_CC;
4768 ccw->cmd_code = cmd;
4769 /* maximum 3390 track size */
4771 /* 64k map to one track */
4772 len_to_track_end = 65536 - start_padding_sectors * 512;
4773 ccw->cda = (__u32)(addr_t)idaws;
4774 ccw->flags |= CCW_FLAG_IDA;
4775 ccw->flags |= CCW_FLAG_SLI;
4777 for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4778 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4780 rq_for_each_segment(bv, req, iter) {
4781 dst = page_address(bv.bv_page) + bv.bv_offset;
4782 seg_len = bv.bv_len;
4783 if (cmd == DASD_ECKD_CCW_READ_TRACK)
4784 memset(dst, 0, seg_len);
4785 if (!len_to_track_end) {
4786 ccw[-1].flags |= CCW_FLAG_CC;
4787 ccw->cmd_code = cmd;
4788 /* maximum 3390 track size */
4790 /* 64k map to one track */
4791 len_to_track_end = 65536;
4792 ccw->cda = (__u32)(addr_t)idaws;
4793 ccw->flags |= CCW_FLAG_IDA;
4794 ccw->flags |= CCW_FLAG_SLI;
4797 len_to_track_end -= seg_len;
4798 idaws = idal_create_words(idaws, dst, seg_len);
4800 for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4801 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4802 if (blk_noretry_request(req) ||
4803 block->base->features & DASD_FEATURE_FAILFAST)
4804 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4805 cqr->startdev = startdev;
4806 cqr->memdev = startdev;
4808 cqr->expires = startdev->default_expires * HZ;
4809 cqr->lpm = dasd_path_get_ppm(startdev);
4810 cqr->retries = startdev->default_retries;
4811 cqr->buildclk = get_tod_clock();
4812 cqr->status = DASD_CQR_FILLED;
4819 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4821 struct dasd_eckd_private *private;
4823 struct req_iterator iter;
4826 unsigned int blksize, blk_per_trk, off;
4830 if (!dasd_page_cache)
4832 private = cqr->block->base->private;
4833 blksize = cqr->block->bp_block;
4834 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4835 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4837 /* Skip over define extent & locate record. */
4839 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4841 rq_for_each_segment(bv, req, iter) {
4842 dst = page_address(bv.bv_page) + bv.bv_offset;
4843 for (off = 0; off < bv.bv_len; off += blksize) {
4844 /* Skip locate record. */
4845 if (private->uses_cdl && recid <= 2*blk_per_trk)
4848 if (ccw->flags & CCW_FLAG_IDA)
4849 cda = *((char **)((addr_t) ccw->cda));
4851 cda = (char *)((addr_t) ccw->cda);
4853 if (rq_data_dir(req) == READ)
4854 memcpy(dst, cda, bv.bv_len);
4855 kmem_cache_free(dasd_page_cache,
4856 (void *)((addr_t)cda & PAGE_MASK));
4865 status = cqr->status == DASD_CQR_DONE;
4866 dasd_sfree_request(cqr, cqr->memdev);
4871 * Modify ccw/tcw in cqr so it can be started on a base device.
4873 * Note that this is not enough to restart the cqr!
4874 * Either reset cqr->startdev as well (summary unit check handling)
4875 * or restart via separate cqr (as in ERP handling).
4877 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4880 struct PFX_eckd_data *pfxdata;
4885 if (cqr->cpmode == 1) {
4887 tccb = tcw_get_tccb(tcw);
4888 dcw = (struct dcw *)&tccb->tca[0];
4889 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4890 pfxdata->validity.verify_base = 0;
4891 pfxdata->validity.hyper_pav = 0;
4894 pfxdata = cqr->data;
4895 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4896 pfxdata->validity.verify_base = 0;
4897 pfxdata->validity.hyper_pav = 0;
4902 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4904 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4905 struct dasd_block *block,
4906 struct request *req)
4908 struct dasd_eckd_private *private;
4909 struct dasd_device *startdev;
4910 unsigned long flags;
4911 struct dasd_ccw_req *cqr;
4913 startdev = dasd_alias_get_start_dev(base);
4916 private = startdev->private;
4917 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4918 return ERR_PTR(-EBUSY);
4920 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4922 if ((base->features & DASD_FEATURE_USERAW))
4923 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4925 cqr = dasd_eckd_build_cp(startdev, block, req);
4928 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4932 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4933 struct request *req)
4935 struct dasd_eckd_private *private;
4936 unsigned long flags;
4938 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4939 private = cqr->memdev->private;
4941 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4942 return dasd_eckd_free_cp(cqr, req);
4946 dasd_eckd_fill_info(struct dasd_device * device,
4947 struct dasd_information2_t * info)
4949 struct dasd_eckd_private *private = device->private;
4951 info->label_block = 2;
4952 info->FBA_layout = private->uses_cdl ? 0 : 1;
4953 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
4954 info->characteristics_size = sizeof(private->rdc_data);
4955 memcpy(info->characteristics, &private->rdc_data,
4956 sizeof(private->rdc_data));
4957 info->confdata_size = min((unsigned long)private->conf_len,
4958 sizeof(info->configuration_data));
4959 memcpy(info->configuration_data, private->conf_data,
4960 info->confdata_size);
4965 * SECTION: ioctl functions for eckd devices.
4969 * Release device ioctl.
4970 * Buils a channel programm to releases a prior reserved
4971 * (see dasd_eckd_reserve) device.
4974 dasd_eckd_release(struct dasd_device *device)
4976 struct dasd_ccw_req *cqr;
4981 if (!capable(CAP_SYS_ADMIN))
4985 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4987 mutex_lock(&dasd_reserve_mutex);
4989 cqr = &dasd_reserve_req->cqr;
4990 memset(cqr, 0, sizeof(*cqr));
4991 memset(&dasd_reserve_req->ccw, 0,
4992 sizeof(dasd_reserve_req->ccw));
4993 cqr->cpaddr = &dasd_reserve_req->ccw;
4994 cqr->data = &dasd_reserve_req->data;
4995 cqr->magic = DASD_ECKD_MAGIC;
4998 ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
4999 ccw->flags |= CCW_FLAG_SLI;
5001 ccw->cda = (__u32)(addr_t) cqr->data;
5002 cqr->startdev = device;
5003 cqr->memdev = device;
5004 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5005 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5006 cqr->retries = 2; /* set retry counter to enable basic ERP */
5007 cqr->expires = 2 * HZ;
5008 cqr->buildclk = get_tod_clock();
5009 cqr->status = DASD_CQR_FILLED;
5011 rc = dasd_sleep_on_immediatly(cqr);
5013 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5016 mutex_unlock(&dasd_reserve_mutex);
5018 dasd_sfree_request(cqr, cqr->memdev);
5023 * Reserve device ioctl.
5024 * Options are set to 'synchronous wait for interrupt' and
5025 * 'timeout the request'. This leads to a terminate IO if
5026 * the interrupt is outstanding for a certain time.
5029 dasd_eckd_reserve(struct dasd_device *device)
5031 struct dasd_ccw_req *cqr;
5036 if (!capable(CAP_SYS_ADMIN))
5040 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5042 mutex_lock(&dasd_reserve_mutex);
5044 cqr = &dasd_reserve_req->cqr;
5045 memset(cqr, 0, sizeof(*cqr));
5046 memset(&dasd_reserve_req->ccw, 0,
5047 sizeof(dasd_reserve_req->ccw));
5048 cqr->cpaddr = &dasd_reserve_req->ccw;
5049 cqr->data = &dasd_reserve_req->data;
5050 cqr->magic = DASD_ECKD_MAGIC;
5053 ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
5054 ccw->flags |= CCW_FLAG_SLI;
5056 ccw->cda = (__u32)(addr_t) cqr->data;
5057 cqr->startdev = device;
5058 cqr->memdev = device;
5059 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5060 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5061 cqr->retries = 2; /* set retry counter to enable basic ERP */
5062 cqr->expires = 2 * HZ;
5063 cqr->buildclk = get_tod_clock();
5064 cqr->status = DASD_CQR_FILLED;
5066 rc = dasd_sleep_on_immediatly(cqr);
5068 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5071 mutex_unlock(&dasd_reserve_mutex);
5073 dasd_sfree_request(cqr, cqr->memdev);
5078 * Steal lock ioctl - unconditional reserve device.
5079 * Buils a channel programm to break a device's reservation.
5080 * (unconditional reserve)
5083 dasd_eckd_steal_lock(struct dasd_device *device)
5085 struct dasd_ccw_req *cqr;
5090 if (!capable(CAP_SYS_ADMIN))
5094 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5096 mutex_lock(&dasd_reserve_mutex);
5098 cqr = &dasd_reserve_req->cqr;
5099 memset(cqr, 0, sizeof(*cqr));
5100 memset(&dasd_reserve_req->ccw, 0,
5101 sizeof(dasd_reserve_req->ccw));
5102 cqr->cpaddr = &dasd_reserve_req->ccw;
5103 cqr->data = &dasd_reserve_req->data;
5104 cqr->magic = DASD_ECKD_MAGIC;
5107 ccw->cmd_code = DASD_ECKD_CCW_SLCK;
5108 ccw->flags |= CCW_FLAG_SLI;
5110 ccw->cda = (__u32)(addr_t) cqr->data;
5111 cqr->startdev = device;
5112 cqr->memdev = device;
5113 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5114 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5115 cqr->retries = 2; /* set retry counter to enable basic ERP */
5116 cqr->expires = 2 * HZ;
5117 cqr->buildclk = get_tod_clock();
5118 cqr->status = DASD_CQR_FILLED;
5120 rc = dasd_sleep_on_immediatly(cqr);
5122 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5125 mutex_unlock(&dasd_reserve_mutex);
5127 dasd_sfree_request(cqr, cqr->memdev);
5132 * SNID - Sense Path Group ID
5133 * This ioctl may be used in situations where I/O is stalled due to
5134 * a reserve, so if the normal dasd_smalloc_request fails, we use the
5135 * preallocated dasd_reserve_req.
5137 static int dasd_eckd_snid(struct dasd_device *device,
5140 struct dasd_ccw_req *cqr;
5144 struct dasd_snid_ioctl_data usrparm;
5146 if (!capable(CAP_SYS_ADMIN))
5149 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5153 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
5154 sizeof(struct dasd_snid_data), device,
5157 mutex_lock(&dasd_reserve_mutex);
5159 cqr = &dasd_reserve_req->cqr;
5160 memset(cqr, 0, sizeof(*cqr));
5161 memset(&dasd_reserve_req->ccw, 0,
5162 sizeof(dasd_reserve_req->ccw));
5163 cqr->cpaddr = &dasd_reserve_req->ccw;
5164 cqr->data = &dasd_reserve_req->data;
5165 cqr->magic = DASD_ECKD_MAGIC;
5168 ccw->cmd_code = DASD_ECKD_CCW_SNID;
5169 ccw->flags |= CCW_FLAG_SLI;
5171 ccw->cda = (__u32)(addr_t) cqr->data;
5172 cqr->startdev = device;
5173 cqr->memdev = device;
5174 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5175 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5176 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
5178 cqr->expires = 10 * HZ;
5179 cqr->buildclk = get_tod_clock();
5180 cqr->status = DASD_CQR_FILLED;
5181 cqr->lpm = usrparm.path_mask;
5183 rc = dasd_sleep_on_immediatly(cqr);
5184 /* verify that I/O processing didn't modify the path mask */
5185 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
5188 usrparm.data = *((struct dasd_snid_data *)cqr->data);
5189 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
5194 mutex_unlock(&dasd_reserve_mutex);
5196 dasd_sfree_request(cqr, cqr->memdev);
5201 * Read performance statistics
5204 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
5206 struct dasd_psf_prssd_data *prssdp;
5207 struct dasd_rssd_perf_stats_t *stats;
5208 struct dasd_ccw_req *cqr;
5212 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5213 (sizeof(struct dasd_psf_prssd_data) +
5214 sizeof(struct dasd_rssd_perf_stats_t)),
5217 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5218 "Could not allocate initialization request");
5219 return PTR_ERR(cqr);
5221 cqr->startdev = device;
5222 cqr->memdev = device;
5224 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5225 cqr->expires = 10 * HZ;
5227 /* Prepare for Read Subsystem Data */
5228 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5229 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5230 prssdp->order = PSF_ORDER_PRSSD;
5231 prssdp->suborder = 0x01; /* Performance Statistics */
5232 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
5235 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5236 ccw->count = sizeof(struct dasd_psf_prssd_data);
5237 ccw->flags |= CCW_FLAG_CC;
5238 ccw->cda = (__u32)(addr_t) prssdp;
5240 /* Read Subsystem Data - Performance Statistics */
5241 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5242 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
5245 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5246 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
5247 ccw->cda = (__u32)(addr_t) stats;
5249 cqr->buildclk = get_tod_clock();
5250 cqr->status = DASD_CQR_FILLED;
5251 rc = dasd_sleep_on(cqr);
5253 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5254 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5255 if (copy_to_user(argp, stats,
5256 sizeof(struct dasd_rssd_perf_stats_t)))
5259 dasd_sfree_request(cqr, cqr->memdev);
5264 * Get attributes (cache operations)
5265 * Returnes the cache attributes used in Define Extend (DE).
5268 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
5270 struct dasd_eckd_private *private = device->private;
5271 struct attrib_data_t attrib = private->attrib;
5274 if (!capable(CAP_SYS_ADMIN))
5280 if (copy_to_user(argp, (long *) &attrib,
5281 sizeof(struct attrib_data_t)))
5288 * Set attributes (cache operations)
5289 * Stores the attributes for cache operation to be used in Define Extend (DE).
5292 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
5294 struct dasd_eckd_private *private = device->private;
5295 struct attrib_data_t attrib;
5297 if (!capable(CAP_SYS_ADMIN))
5302 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
5304 private->attrib = attrib;
5306 dev_info(&device->cdev->dev,
5307 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5308 private->attrib.operation, private->attrib.nr_cyl);
5313 * Issue syscall I/O to EMC Symmetrix array.
5314 * CCWs are PSF and RSSD
5316 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5318 struct dasd_symmio_parms usrparm;
5319 char *psf_data, *rssd_result;
5320 struct dasd_ccw_req *cqr;
5325 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5329 /* Copy parms from caller */
5331 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5333 if (is_compat_task()) {
5334 /* Make sure pointers are sane even on 31 bit. */
5336 if ((usrparm.psf_data >> 32) != 0)
5338 if ((usrparm.rssd_result >> 32) != 0)
5340 usrparm.psf_data &= 0x7fffffffULL;
5341 usrparm.rssd_result &= 0x7fffffffULL;
5343 /* at least 2 bytes are accessed and should be allocated */
5344 if (usrparm.psf_data_len < 2) {
5345 DBF_DEV_EVENT(DBF_WARNING, device,
5346 "Symmetrix ioctl invalid data length %d",
5347 usrparm.psf_data_len);
5351 /* alloc I/O data area */
5352 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5353 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5354 if (!psf_data || !rssd_result) {
5359 /* get syscall header from user space */
5361 if (copy_from_user(psf_data,
5362 (void __user *)(unsigned long) usrparm.psf_data,
5363 usrparm.psf_data_len))
5368 /* setup CCWs for PSF + RSSD */
5369 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5371 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5372 "Could not allocate initialization request");
5377 cqr->startdev = device;
5378 cqr->memdev = device;
5380 cqr->expires = 10 * HZ;
5381 cqr->buildclk = get_tod_clock();
5382 cqr->status = DASD_CQR_FILLED;
5384 /* Build the ccws */
5388 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5389 ccw->count = usrparm.psf_data_len;
5390 ccw->flags |= CCW_FLAG_CC;
5391 ccw->cda = (__u32)(addr_t) psf_data;
5396 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5397 ccw->count = usrparm.rssd_result_len;
5398 ccw->flags = CCW_FLAG_SLI ;
5399 ccw->cda = (__u32)(addr_t) rssd_result;
5401 rc = dasd_sleep_on(cqr);
5406 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5407 rssd_result, usrparm.rssd_result_len))
5412 dasd_sfree_request(cqr, cqr->memdev);
5417 DBF_DEV_EVENT(DBF_WARNING, device,
5418 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5419 (int) psf0, (int) psf1, rc);
5424 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5426 struct dasd_device *device = block->base;
5430 return dasd_eckd_get_attrib(device, argp);
5432 return dasd_eckd_set_attrib(device, argp);
5434 return dasd_eckd_performance(device, argp);
5436 return dasd_eckd_release(device);
5438 return dasd_eckd_reserve(device);
5440 return dasd_eckd_steal_lock(device);
5442 return dasd_eckd_snid(device, argp);
5444 return dasd_symm_io(device, argp);
5451 * Dump the range of CCWs into 'page' buffer
5452 * and return number of printed chars.
5455 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
5461 while (from <= to) {
5462 len += sprintf(page + len, PRINTK_HEADER
5463 " CCW %p: %08X %08X DAT:",
5464 from, ((int *) from)[0], ((int *) from)[1]);
5466 /* get pointer to data (consider IDALs) */
5467 if (from->flags & CCW_FLAG_IDA)
5468 datap = (char *) *((addr_t *) (addr_t) from->cda);
5470 datap = (char *) ((addr_t) from->cda);
5472 /* dump data (max 32 bytes) */
5473 for (count = 0; count < from->count && count < 32; count++) {
5474 if (count % 8 == 0) len += sprintf(page + len, " ");
5475 if (count % 4 == 0) len += sprintf(page + len, " ");
5476 len += sprintf(page + len, "%02x", datap[count]);
5478 len += sprintf(page + len, "\n");
5485 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5491 sense = (u64 *) dasd_get_sense(irb);
5492 stat = (u64 *) &irb->scsw;
5494 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5495 "%016llx %016llx %016llx %016llx",
5496 reason, *stat, *((u32 *) (stat + 1)),
5497 sense[0], sense[1], sense[2], sense[3]);
5499 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5500 reason, *stat, *((u32 *) (stat + 1)),
5506 * Print sense data and related channel program.
5507 * Parts are printed because printk buffer is only 1024 bytes.
5509 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
5510 struct dasd_ccw_req *req, struct irb *irb)
5513 struct ccw1 *first, *last, *fail, *from, *to;
5516 page = (char *) get_zeroed_page(GFP_ATOMIC);
5518 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5519 "No memory to dump sense data\n");
5522 /* dump the sense data */
5523 len = sprintf(page, PRINTK_HEADER
5524 " I/O status report for device %s:\n",
5525 dev_name(&device->cdev->dev));
5526 len += sprintf(page + len, PRINTK_HEADER
5527 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5529 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5530 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5531 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5532 req ? req->intrc : 0);
5533 len += sprintf(page + len, PRINTK_HEADER
5534 " device %s: Failing CCW: %p\n",
5535 dev_name(&device->cdev->dev),
5536 (void *) (addr_t) irb->scsw.cmd.cpa);
5537 if (irb->esw.esw0.erw.cons) {
5538 for (sl = 0; sl < 4; sl++) {
5539 len += sprintf(page + len, PRINTK_HEADER
5540 " Sense(hex) %2d-%2d:",
5541 (8 * sl), ((8 * sl) + 7));
5543 for (sct = 0; sct < 8; sct++) {
5544 len += sprintf(page + len, " %02x",
5545 irb->ecw[8 * sl + sct]);
5547 len += sprintf(page + len, "\n");
5550 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5551 /* 24 Byte Sense Data */
5552 sprintf(page + len, PRINTK_HEADER
5553 " 24 Byte: %x MSG %x, "
5554 "%s MSGb to SYSOP\n",
5555 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5556 irb->ecw[1] & 0x10 ? "" : "no");
5558 /* 32 Byte Sense Data */
5559 sprintf(page + len, PRINTK_HEADER
5560 " 32 Byte: Format: %x "
5561 "Exception class %x\n",
5562 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5565 sprintf(page + len, PRINTK_HEADER
5566 " SORRY - NO VALID SENSE AVAILABLE\n");
5568 printk(KERN_ERR "%s", page);
5571 /* req == NULL for unsolicited interrupts */
5572 /* dump the Channel Program (max 140 Bytes per line) */
5573 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
5574 first = req->cpaddr;
5575 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5576 to = min(first + 6, last);
5577 len = sprintf(page, PRINTK_HEADER
5578 " Related CP in req: %p\n", req);
5579 dasd_eckd_dump_ccw_range(first, to, page + len);
5580 printk(KERN_ERR "%s", page);
5582 /* print failing CCW area (maximum 4) */
5583 /* scsw->cda is either valid or zero */
5586 fail = (struct ccw1 *)(addr_t)
5587 irb->scsw.cmd.cpa; /* failing CCW */
5588 if (from < fail - 2) {
5589 from = fail - 2; /* there is a gap - print header */
5590 len += sprintf(page, PRINTK_HEADER "......\n");
5592 to = min(fail + 1, last);
5593 len += dasd_eckd_dump_ccw_range(from, to, page + len);
5595 /* print last CCWs (maximum 2) */
5596 from = max(from, ++to);
5597 if (from < last - 1) {
5598 from = last - 1; /* there is a gap - print header */
5599 len += sprintf(page + len, PRINTK_HEADER "......\n");
5601 len += dasd_eckd_dump_ccw_range(from, last, page + len);
5603 printk(KERN_ERR "%s", page);
5605 free_page((unsigned long) page);
5610 * Print sense data from a tcw.
5612 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5613 struct dasd_ccw_req *req, struct irb *irb)
5616 int len, sl, sct, residual;
5620 page = (char *) get_zeroed_page(GFP_ATOMIC);
5622 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5623 "No memory to dump sense data");
5626 /* dump the sense data */
5627 len = sprintf(page, PRINTK_HEADER
5628 " I/O status report for device %s:\n",
5629 dev_name(&device->cdev->dev));
5630 len += sprintf(page + len, PRINTK_HEADER
5631 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5632 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5633 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5634 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5635 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5637 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5638 req ? req->intrc : 0);
5639 len += sprintf(page + len, PRINTK_HEADER
5640 " device %s: Failing TCW: %p\n",
5641 dev_name(&device->cdev->dev),
5642 (void *) (addr_t) irb->scsw.tm.tcw);
5646 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5648 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
5651 len += sprintf(page + len, PRINTK_HEADER
5652 " tsb->length %d\n", tsb->length);
5653 len += sprintf(page + len, PRINTK_HEADER
5654 " tsb->flags %x\n", tsb->flags);
5655 len += sprintf(page + len, PRINTK_HEADER
5656 " tsb->dcw_offset %d\n", tsb->dcw_offset);
5657 len += sprintf(page + len, PRINTK_HEADER
5658 " tsb->count %d\n", tsb->count);
5659 residual = tsb->count - 28;
5660 len += sprintf(page + len, PRINTK_HEADER
5661 " residual %d\n", residual);
5663 switch (tsb->flags & 0x07) {
5664 case 1: /* tsa_iostat */
5665 len += sprintf(page + len, PRINTK_HEADER
5666 " tsb->tsa.iostat.dev_time %d\n",
5667 tsb->tsa.iostat.dev_time);
5668 len += sprintf(page + len, PRINTK_HEADER
5669 " tsb->tsa.iostat.def_time %d\n",
5670 tsb->tsa.iostat.def_time);
5671 len += sprintf(page + len, PRINTK_HEADER
5672 " tsb->tsa.iostat.queue_time %d\n",
5673 tsb->tsa.iostat.queue_time);
5674 len += sprintf(page + len, PRINTK_HEADER
5675 " tsb->tsa.iostat.dev_busy_time %d\n",
5676 tsb->tsa.iostat.dev_busy_time);
5677 len += sprintf(page + len, PRINTK_HEADER
5678 " tsb->tsa.iostat.dev_act_time %d\n",
5679 tsb->tsa.iostat.dev_act_time);
5680 sense = tsb->tsa.iostat.sense;
5682 case 2: /* ts_ddpc */
5683 len += sprintf(page + len, PRINTK_HEADER
5684 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
5685 for (sl = 0; sl < 2; sl++) {
5686 len += sprintf(page + len, PRINTK_HEADER
5687 " tsb->tsa.ddpc.rcq %2d-%2d: ",
5688 (8 * sl), ((8 * sl) + 7));
5689 rcq = tsb->tsa.ddpc.rcq;
5690 for (sct = 0; sct < 8; sct++) {
5691 len += sprintf(page + len, " %02x",
5694 len += sprintf(page + len, "\n");
5696 sense = tsb->tsa.ddpc.sense;
5698 case 3: /* tsa_intrg */
5699 len += sprintf(page + len, PRINTK_HEADER
5700 " tsb->tsa.intrg.: not supported yet\n");
5705 for (sl = 0; sl < 4; sl++) {
5706 len += sprintf(page + len, PRINTK_HEADER
5707 " Sense(hex) %2d-%2d:",
5708 (8 * sl), ((8 * sl) + 7));
5709 for (sct = 0; sct < 8; sct++) {
5710 len += sprintf(page + len, " %02x",
5711 sense[8 * sl + sct]);
5713 len += sprintf(page + len, "\n");
5716 if (sense[27] & DASD_SENSE_BIT_0) {
5717 /* 24 Byte Sense Data */
5718 sprintf(page + len, PRINTK_HEADER
5719 " 24 Byte: %x MSG %x, "
5720 "%s MSGb to SYSOP\n",
5721 sense[7] >> 4, sense[7] & 0x0f,
5722 sense[1] & 0x10 ? "" : "no");
5724 /* 32 Byte Sense Data */
5725 sprintf(page + len, PRINTK_HEADER
5726 " 32 Byte: Format: %x "
5727 "Exception class %x\n",
5728 sense[6] & 0x0f, sense[22] >> 4);
5731 sprintf(page + len, PRINTK_HEADER
5732 " SORRY - NO VALID SENSE AVAILABLE\n");
5735 sprintf(page + len, PRINTK_HEADER
5736 " SORRY - NO TSB DATA AVAILABLE\n");
5738 printk(KERN_ERR "%s", page);
5739 free_page((unsigned long) page);
5742 static void dasd_eckd_dump_sense(struct dasd_device *device,
5743 struct dasd_ccw_req *req, struct irb *irb)
5745 u8 *sense = dasd_get_sense(irb);
5747 if (scsw_is_tm(&irb->scsw)) {
5749 * In some cases the 'File Protected' or 'Incorrect Length'
5750 * error might be expected and log messages shouldn't be written
5751 * then. Check if the according suppress bit is set.
5753 if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5754 test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5756 if (scsw_cstat(&irb->scsw) == 0x40 &&
5757 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5760 dasd_eckd_dump_sense_tcw(device, req, irb);
5763 * In some cases the 'Command Reject' or 'No Record Found'
5764 * error might be expected and log messages shouldn't be
5765 * written then. Check if the according suppress bit is set.
5767 if (sense && sense[0] & SNS0_CMD_REJECT &&
5768 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5771 if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5772 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5775 dasd_eckd_dump_sense_ccw(device, req, irb);
5779 static int dasd_eckd_reload_device(struct dasd_device *device)
5781 struct dasd_eckd_private *private = device->private;
5784 struct dasd_uid uid;
5785 unsigned long flags;
5788 * remove device from alias handling to prevent new requests
5789 * from being scheduled on the wrong alias device
5791 dasd_alias_remove_device(device);
5793 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5794 old_base = private->uid.base_unit_addr;
5795 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5797 /* Read Configuration Data */
5798 rc = dasd_eckd_read_conf(device);
5802 rc = dasd_eckd_generate_uid(device);
5806 * update unit address configuration and
5807 * add device to alias management
5809 dasd_alias_update_add_device(device);
5811 dasd_eckd_get_uid(device, &uid);
5813 if (old_base != uid.base_unit_addr) {
5814 if (strlen(uid.vduit) > 0)
5815 snprintf(print_uid, sizeof(print_uid),
5816 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
5817 uid.ssid, uid.base_unit_addr, uid.vduit);
5819 snprintf(print_uid, sizeof(print_uid),
5820 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
5821 uid.ssid, uid.base_unit_addr);
5823 dev_info(&device->cdev->dev,
5824 "An Alias device was reassigned to a new base device "
5825 "with UID: %s\n", print_uid);
5833 static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5834 struct dasd_rssd_messages *messages,
5837 struct dasd_rssd_messages *message_buf;
5838 struct dasd_psf_prssd_data *prssdp;
5839 struct dasd_ccw_req *cqr;
5843 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5844 (sizeof(struct dasd_psf_prssd_data) +
5845 sizeof(struct dasd_rssd_messages)),
5848 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5849 "Could not allocate read message buffer request");
5850 return PTR_ERR(cqr);
5855 cqr->startdev = device;
5856 cqr->memdev = device;
5858 cqr->expires = 10 * HZ;
5859 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5860 /* dasd_sleep_on_immediatly does not do complex error
5861 * recovery so clear erp flag and set retry counter to
5863 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5866 /* Prepare for Read Subsystem Data */
5867 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5868 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5869 prssdp->order = PSF_ORDER_PRSSD;
5870 prssdp->suborder = 0x03; /* Message Buffer */
5871 /* all other bytes of prssdp must be zero */
5874 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5875 ccw->count = sizeof(struct dasd_psf_prssd_data);
5876 ccw->flags |= CCW_FLAG_CC;
5877 ccw->flags |= CCW_FLAG_SLI;
5878 ccw->cda = (__u32)(addr_t) prssdp;
5880 /* Read Subsystem Data - message buffer */
5881 message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5882 memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5885 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5886 ccw->count = sizeof(struct dasd_rssd_messages);
5887 ccw->flags |= CCW_FLAG_SLI;
5888 ccw->cda = (__u32)(addr_t) message_buf;
5890 cqr->buildclk = get_tod_clock();
5891 cqr->status = DASD_CQR_FILLED;
5892 rc = dasd_sleep_on_immediatly(cqr);
5894 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5895 message_buf = (struct dasd_rssd_messages *)
5897 memcpy(messages, message_buf,
5898 sizeof(struct dasd_rssd_messages));
5899 } else if (cqr->lpm) {
5901 * on z/VM we might not be able to do I/O on the requested path
5902 * but instead we get the required information on any path
5903 * so retry with open path mask
5908 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5909 "Reading messages failed with rc=%d\n"
5911 dasd_sfree_request(cqr, cqr->memdev);
5915 static int dasd_eckd_query_host_access(struct dasd_device *device,
5916 struct dasd_psf_query_host_access *data)
5918 struct dasd_eckd_private *private = device->private;
5919 struct dasd_psf_query_host_access *host_access;
5920 struct dasd_psf_prssd_data *prssdp;
5921 struct dasd_ccw_req *cqr;
5925 /* not available for HYPER PAV alias devices */
5926 if (!device->block && private->lcu->pav == HYPER_PAV)
5929 /* may not be supported by the storage server */
5930 if (!(private->features.feature[14] & 0x80))
5933 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5934 sizeof(struct dasd_psf_prssd_data) + 1,
5937 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5938 "Could not allocate read message buffer request");
5939 return PTR_ERR(cqr);
5941 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5943 dasd_sfree_request(cqr, device);
5944 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5945 "Could not allocate host_access buffer");
5948 cqr->startdev = device;
5949 cqr->memdev = device;
5952 cqr->expires = 10 * HZ;
5954 /* Prepare for Read Subsystem Data */
5955 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5956 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5957 prssdp->order = PSF_ORDER_PRSSD;
5958 prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
5959 /* LSS and Volume that will be queried */
5960 prssdp->lss = private->ned->ID;
5961 prssdp->volume = private->ned->unit_addr;
5962 /* all other bytes of prssdp must be zero */
5965 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5966 ccw->count = sizeof(struct dasd_psf_prssd_data);
5967 ccw->flags |= CCW_FLAG_CC;
5968 ccw->flags |= CCW_FLAG_SLI;
5969 ccw->cda = (__u32)(addr_t) prssdp;
5971 /* Read Subsystem Data - query host access */
5973 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5974 ccw->count = sizeof(struct dasd_psf_query_host_access);
5975 ccw->flags |= CCW_FLAG_SLI;
5976 ccw->cda = (__u32)(addr_t) host_access;
5978 cqr->buildclk = get_tod_clock();
5979 cqr->status = DASD_CQR_FILLED;
5980 /* the command might not be supported, suppress error message */
5981 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
5982 rc = dasd_sleep_on_interruptible(cqr);
5984 *data = *host_access;
5986 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5987 "Reading host access data failed with rc=%d\n",
5992 dasd_sfree_request(cqr, cqr->memdev);
5997 * return number of grouped devices
5999 static int dasd_eckd_host_access_count(struct dasd_device *device)
6001 struct dasd_psf_query_host_access *access;
6002 struct dasd_ckd_path_group_entry *entry;
6003 struct dasd_ckd_host_information *info;
6007 access = kzalloc(sizeof(*access), GFP_NOIO);
6009 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6010 "Could not allocate access buffer");
6013 rc = dasd_eckd_query_host_access(device, access);
6019 info = (struct dasd_ckd_host_information *)
6020 access->host_access_information;
6021 for (i = 0; i < info->entry_count; i++) {
6022 entry = (struct dasd_ckd_path_group_entry *)
6023 (info->entry + i * info->entry_size);
6024 if (entry->status_flags & DASD_ECKD_PG_GROUPED)
6033 * write host access information to a sequential file
6035 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
6037 struct dasd_psf_query_host_access *access;
6038 struct dasd_ckd_path_group_entry *entry;
6039 struct dasd_ckd_host_information *info;
6040 char sysplex[9] = "";
6043 access = kzalloc(sizeof(*access), GFP_NOIO);
6045 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6046 "Could not allocate access buffer");
6049 rc = dasd_eckd_query_host_access(device, access);
6055 info = (struct dasd_ckd_host_information *)
6056 access->host_access_information;
6057 for (i = 0; i < info->entry_count; i++) {
6058 entry = (struct dasd_ckd_path_group_entry *)
6059 (info->entry + i * info->entry_size);
6061 seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
6063 seq_printf(m, "status_flags %02x\n", entry->status_flags);
6065 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
6066 EBCASC(sysplex, sizeof(sysplex));
6067 seq_printf(m, "sysplex_name %8s\n", sysplex);
6068 /* SUPPORTED CYLINDER */
6069 seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
6071 seq_printf(m, "timestamp %lu\n", (unsigned long)
6080 * Perform Subsystem Function - CUIR response
6083 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
6084 __u32 message_id, __u8 lpum)
6086 struct dasd_psf_cuir_response *psf_cuir;
6087 int pos = pathmask_to_pos(lpum);
6088 struct dasd_ccw_req *cqr;
6092 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
6093 sizeof(struct dasd_psf_cuir_response),
6097 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6098 "Could not allocate PSF-CUIR request");
6099 return PTR_ERR(cqr);
6102 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
6103 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
6104 psf_cuir->cc = response;
6105 psf_cuir->chpid = device->path[pos].chpid;
6106 psf_cuir->message_id = message_id;
6107 psf_cuir->cssid = device->path[pos].cssid;
6108 psf_cuir->ssid = device->path[pos].ssid;
6110 ccw->cmd_code = DASD_ECKD_CCW_PSF;
6111 ccw->cda = (__u32)(addr_t)psf_cuir;
6112 ccw->flags = CCW_FLAG_SLI;
6113 ccw->count = sizeof(struct dasd_psf_cuir_response);
6115 cqr->startdev = device;
6116 cqr->memdev = device;
6119 cqr->expires = 10*HZ;
6120 cqr->buildclk = get_tod_clock();
6121 cqr->status = DASD_CQR_FILLED;
6122 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6124 rc = dasd_sleep_on(cqr);
6126 dasd_sfree_request(cqr, cqr->memdev);
6131 * return configuration data that is referenced by record selector
6132 * if a record selector is specified or per default return the
6133 * conf_data pointer for the path specified by lpum
6135 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
6137 struct dasd_cuir_message *cuir)
6139 struct dasd_conf_data *conf_data;
6142 if (cuir->record_selector == 0)
6144 for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
6145 conf_data = device->path[pos].conf_data;
6146 if (conf_data->gneq.record_selector ==
6147 cuir->record_selector)
6151 return device->path[pathmask_to_pos(lpum)].conf_data;
6155 * This function determines the scope of a reconfiguration request by
6156 * analysing the path and device selection data provided in the CUIR request.
6157 * Returns a path mask containing CUIR affected paths for the give device.
6159 * If the CUIR request does not contain the required information return the
6160 * path mask of the path the attention message for the CUIR request was reveived
6163 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
6164 struct dasd_cuir_message *cuir)
6166 struct dasd_conf_data *ref_conf_data;
6167 unsigned long bitmask = 0, mask = 0;
6168 struct dasd_conf_data *conf_data;
6169 unsigned int pos, path;
6170 char *ref_gneq, *gneq;
6171 char *ref_ned, *ned;
6174 /* if CUIR request does not specify the scope use the path
6175 the attention message was presented on */
6176 if (!cuir->ned_map ||
6177 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6180 /* get reference conf data */
6181 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6182 /* reference ned is determined by ned_map field */
6183 pos = 8 - ffs(cuir->ned_map);
6184 ref_ned = (char *)&ref_conf_data->neds[pos];
6185 ref_gneq = (char *)&ref_conf_data->gneq;
6186 /* transfer 24 bit neq_map to mask */
6187 mask = cuir->neq_map[2];
6188 mask |= cuir->neq_map[1] << 8;
6189 mask |= cuir->neq_map[0] << 16;
6191 for (path = 0; path < 8; path++) {
6192 /* initialise data per path */
6194 conf_data = device->path[path].conf_data;
6195 pos = 8 - ffs(cuir->ned_map);
6196 ned = (char *) &conf_data->neds[pos];
6197 /* compare reference ned and per path ned */
6198 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6200 gneq = (char *)&conf_data->gneq;
6201 /* compare reference gneq and per_path gneq under
6202 24 bit mask where mask bit 0 equals byte 7 of
6203 the gneq and mask bit 24 equals byte 31 */
6205 pos = ffs(bitmask) - 1;
6206 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6209 clear_bit(pos, &bitmask);
6213 /* device and path match the reference values
6214 add path to CUIR scope */
6215 tbcpm |= 0x80 >> path;
6220 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
6221 unsigned long paths, int action)
6226 /* get position of bit in mask */
6227 pos = 8 - ffs(paths);
6228 /* get channel path descriptor from this position */
6229 if (action == CUIR_QUIESCE)
6230 pr_warn("Service on the storage server caused path %x.%02x to go offline",
6231 device->path[pos].cssid,
6232 device->path[pos].chpid);
6233 else if (action == CUIR_RESUME)
6234 pr_info("Path %x.%02x is back online after service on the storage server",
6235 device->path[pos].cssid,
6236 device->path[pos].chpid);
6237 clear_bit(7 - pos, &paths);
6241 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6242 struct dasd_cuir_message *cuir)
6244 unsigned long tbcpm;
6246 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6247 /* nothing to do if path is not in use */
6248 if (!(dasd_path_get_opm(device) & tbcpm))
6250 if (!(dasd_path_get_opm(device) & ~tbcpm)) {
6251 /* no path would be left if the CUIR action is taken
6255 /* remove device from operational path mask */
6256 dasd_path_remove_opm(device, tbcpm);
6257 dasd_path_add_cuirpm(device, tbcpm);
6262 * walk through all devices and build a path mask to quiesce them
6263 * return an error if the last path to a device would be removed
6265 * if only part of the devices are quiesced and an error
6266 * occurs no onlining necessary, the storage server will
6267 * notify the already set offline devices again
6269 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
6270 struct dasd_cuir_message *cuir)
6272 struct dasd_eckd_private *private = device->private;
6273 struct alias_pav_group *pavgroup, *tempgroup;
6274 struct dasd_device *dev, *n;
6275 unsigned long paths = 0;
6276 unsigned long flags;
6279 /* active devices */
6280 list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6282 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6283 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6284 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6289 /* inactive devices */
6290 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6292 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6293 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6294 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6299 /* devices in PAV groups */
6300 list_for_each_entry_safe(pavgroup, tempgroup,
6301 &private->lcu->grouplist, group) {
6302 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6304 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6305 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6306 spin_unlock_irqrestore(
6307 get_ccwdev_lock(dev->cdev), flags);
6312 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6314 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6315 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6316 spin_unlock_irqrestore(
6317 get_ccwdev_lock(dev->cdev), flags);
6323 /* notify user about all paths affected by CUIR action */
6324 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
6330 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
6331 struct dasd_cuir_message *cuir)
6333 struct dasd_eckd_private *private = device->private;
6334 struct alias_pav_group *pavgroup, *tempgroup;
6335 struct dasd_device *dev, *n;
6336 unsigned long paths = 0;
6340 * the path may have been added through a generic path event before
6341 * only trigger path verification if the path is not already in use
6343 list_for_each_entry_safe(dev, n,
6344 &private->lcu->active_devices,
6346 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6348 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6349 dasd_path_add_tbvpm(dev, tbcpm);
6350 dasd_schedule_device_bh(dev);
6353 list_for_each_entry_safe(dev, n,
6354 &private->lcu->inactive_devices,
6356 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6358 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6359 dasd_path_add_tbvpm(dev, tbcpm);
6360 dasd_schedule_device_bh(dev);
6363 /* devices in PAV groups */
6364 list_for_each_entry_safe(pavgroup, tempgroup,
6365 &private->lcu->grouplist,
6367 list_for_each_entry_safe(dev, n,
6368 &pavgroup->baselist,
6370 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6372 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6373 dasd_path_add_tbvpm(dev, tbcpm);
6374 dasd_schedule_device_bh(dev);
6377 list_for_each_entry_safe(dev, n,
6378 &pavgroup->aliaslist,
6380 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6382 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6383 dasd_path_add_tbvpm(dev, tbcpm);
6384 dasd_schedule_device_bh(dev);
6388 /* notify user about all paths affected by CUIR action */
6389 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
6393 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6396 struct dasd_cuir_message *cuir = messages;
6399 DBF_DEV_EVENT(DBF_WARNING, device,
6400 "CUIR request: %016llx %016llx %016llx %08x",
6401 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6404 if (cuir->code == CUIR_QUIESCE) {
6406 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
6407 response = PSF_CUIR_LAST_PATH;
6409 response = PSF_CUIR_COMPLETED;
6410 } else if (cuir->code == CUIR_RESUME) {
6412 dasd_eckd_cuir_resume(device, lpum, cuir);
6413 response = PSF_CUIR_COMPLETED;
6415 response = PSF_CUIR_NOT_SUPPORTED;
6417 dasd_eckd_psf_cuir_response(device, response,
6418 cuir->message_id, lpum);
6419 DBF_DEV_EVENT(DBF_WARNING, device,
6420 "CUIR response: %d on message ID %08x", response,
6422 /* to make sure there is no attention left schedule work again */
6423 device->discipline->check_attention(device, lpum);
6426 static void dasd_eckd_oos_resume(struct dasd_device *device)
6428 struct dasd_eckd_private *private = device->private;
6429 struct alias_pav_group *pavgroup, *tempgroup;
6430 struct dasd_device *dev, *n;
6431 unsigned long flags;
6433 spin_lock_irqsave(&private->lcu->lock, flags);
6434 list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6436 if (dev->stopped & DASD_STOPPED_NOSPC)
6437 dasd_generic_space_avail(dev);
6439 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6441 if (dev->stopped & DASD_STOPPED_NOSPC)
6442 dasd_generic_space_avail(dev);
6444 /* devices in PAV groups */
6445 list_for_each_entry_safe(pavgroup, tempgroup,
6446 &private->lcu->grouplist,
6448 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6450 if (dev->stopped & DASD_STOPPED_NOSPC)
6451 dasd_generic_space_avail(dev);
6453 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6455 if (dev->stopped & DASD_STOPPED_NOSPC)
6456 dasd_generic_space_avail(dev);
6459 spin_unlock_irqrestore(&private->lcu->lock, flags);
6462 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
6465 struct dasd_oos_message *oos = messages;
6467 switch (oos->code) {
6470 dev_warn(&device->cdev->dev,
6471 "Extent pool usage has reached a critical value\n");
6472 dasd_eckd_oos_resume(device);
6476 dev_warn(&device->cdev->dev,
6477 "Extent pool is exhausted\n");
6481 dev_info(&device->cdev->dev,
6482 "Extent pool physical space constraint has been relieved\n");
6486 /* In any case, update related data */
6487 dasd_eckd_read_ext_pool_info(device);
6489 /* to make sure there is no attention left schedule work again */
6490 device->discipline->check_attention(device, lpum);
6493 static void dasd_eckd_check_attention_work(struct work_struct *work)
6495 struct check_attention_work_data *data;
6496 struct dasd_rssd_messages *messages;
6497 struct dasd_device *device;
6500 data = container_of(work, struct check_attention_work_data, worker);
6501 device = data->device;
6502 messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6504 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6505 "Could not allocate attention message buffer");
6508 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6512 if (messages->length == ATTENTION_LENGTH_CUIR &&
6513 messages->format == ATTENTION_FORMAT_CUIR)
6514 dasd_eckd_handle_cuir(device, messages, data->lpum);
6515 if (messages->length == ATTENTION_LENGTH_OOS &&
6516 messages->format == ATTENTION_FORMAT_OOS)
6517 dasd_eckd_handle_oos(device, messages, data->lpum);
6520 dasd_put_device(device);
6525 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6527 struct check_attention_work_data *data;
6529 data = kzalloc(sizeof(*data), GFP_ATOMIC);
6532 INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6533 dasd_get_device(device);
6534 data->device = device;
6536 schedule_work(&data->worker);
6540 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6542 if (~lpum & dasd_path_get_opm(device)) {
6543 dasd_path_add_nohpfpm(device, lpum);
6544 dasd_path_remove_opm(device, lpum);
6545 dev_err(&device->cdev->dev,
6546 "Channel path %02X lost HPF functionality and is disabled\n",
6553 static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6555 struct dasd_eckd_private *private = device->private;
6557 dev_err(&device->cdev->dev,
6558 "High Performance FICON disabled\n");
6559 private->fcx_max_data = 0;
6562 static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6564 struct dasd_eckd_private *private = device->private;
6566 return private->fcx_max_data ? 1 : 0;
6569 static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6572 struct dasd_eckd_private *private = device->private;
6574 if (!private->fcx_max_data) {
6575 /* sanity check for no HPF, the error makes no sense */
6576 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6577 "Trying to disable HPF for a non HPF device");
6580 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6581 dasd_eckd_disable_hpf_device(device);
6582 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6583 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6585 dasd_eckd_disable_hpf_device(device);
6586 dasd_path_set_tbvpm(device,
6587 dasd_path_get_hpfpm(device));
6590 * prevent that any new I/O ist started on the device and schedule a
6591 * requeue of existing requests
6593 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6594 dasd_schedule_requeue(device);
6598 * Initialize block layer request queue.
6600 static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
6602 unsigned int logical_block_size = block->bp_block;
6603 struct request_queue *q = block->request_queue;
6604 struct dasd_device *device = block->base;
6607 if (device->features & DASD_FEATURE_USERAW) {
6609 * the max_blocks value for raw_track access is 256
6610 * it is higher than the native ECKD value because we
6611 * only need one ccw per track
6612 * so the max_hw_sectors are
6613 * 2048 x 512B = 1024kB = 16 tracks
6615 max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6617 max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
6619 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
6620 q->limits.max_dev_sectors = max;
6621 blk_queue_logical_block_size(q, logical_block_size);
6622 blk_queue_max_hw_sectors(q, max);
6623 blk_queue_max_segments(q, USHRT_MAX);
6624 /* With page sized segments each segment can be translated into one idaw/tidaw */
6625 blk_queue_max_segment_size(q, PAGE_SIZE);
6626 blk_queue_segment_boundary(q, PAGE_SIZE - 1);
6629 static struct ccw_driver dasd_eckd_driver = {
6631 .name = "dasd-eckd",
6632 .owner = THIS_MODULE,
6634 .ids = dasd_eckd_ids,
6635 .probe = dasd_eckd_probe,
6636 .remove = dasd_generic_remove,
6637 .set_offline = dasd_generic_set_offline,
6638 .set_online = dasd_eckd_set_online,
6639 .notify = dasd_generic_notify,
6640 .path_event = dasd_generic_path_event,
6641 .shutdown = dasd_generic_shutdown,
6642 .uc_handler = dasd_generic_uc_handler,
6643 .int_class = IRQIO_DAS,
6646 static struct dasd_discipline dasd_eckd_discipline = {
6647 .owner = THIS_MODULE,
6650 .check_device = dasd_eckd_check_characteristics,
6651 .uncheck_device = dasd_eckd_uncheck_device,
6652 .do_analysis = dasd_eckd_do_analysis,
6653 .pe_handler = dasd_eckd_pe_handler,
6654 .basic_to_ready = dasd_eckd_basic_to_ready,
6655 .online_to_ready = dasd_eckd_online_to_ready,
6656 .basic_to_known = dasd_eckd_basic_to_known,
6657 .setup_blk_queue = dasd_eckd_setup_blk_queue,
6658 .fill_geometry = dasd_eckd_fill_geometry,
6659 .start_IO = dasd_start_IO,
6660 .term_IO = dasd_term_IO,
6661 .handle_terminated_request = dasd_eckd_handle_terminated_request,
6662 .format_device = dasd_eckd_format_device,
6663 .check_device_format = dasd_eckd_check_device_format,
6664 .erp_action = dasd_eckd_erp_action,
6665 .erp_postaction = dasd_eckd_erp_postaction,
6666 .check_for_device_change = dasd_eckd_check_for_device_change,
6667 .build_cp = dasd_eckd_build_alias_cp,
6668 .free_cp = dasd_eckd_free_alias_cp,
6669 .dump_sense = dasd_eckd_dump_sense,
6670 .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6671 .fill_info = dasd_eckd_fill_info,
6672 .ioctl = dasd_eckd_ioctl,
6673 .reload = dasd_eckd_reload_device,
6674 .get_uid = dasd_eckd_get_uid,
6675 .kick_validate = dasd_eckd_kick_validate_server,
6676 .check_attention = dasd_eckd_check_attention,
6677 .host_access_count = dasd_eckd_host_access_count,
6678 .hosts_print = dasd_hosts_print,
6679 .handle_hpf_error = dasd_eckd_handle_hpf_error,
6680 .disable_hpf = dasd_eckd_disable_hpf_device,
6681 .hpf_enabled = dasd_eckd_hpf_enabled,
6682 .reset_path = dasd_eckd_reset_path,
6683 .is_ese = dasd_eckd_is_ese,
6684 .space_allocated = dasd_eckd_space_allocated,
6685 .space_configured = dasd_eckd_space_configured,
6686 .logical_capacity = dasd_eckd_logical_capacity,
6687 .release_space = dasd_eckd_release_space,
6688 .ext_pool_id = dasd_eckd_ext_pool_id,
6689 .ext_size = dasd_eckd_ext_size,
6690 .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6691 .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6692 .ext_pool_oos = dasd_eckd_ext_pool_oos,
6693 .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
6694 .ese_format = dasd_eckd_ese_format,
6695 .ese_read = dasd_eckd_ese_read,
6699 dasd_eckd_init(void)
6703 ASCEBC(dasd_eckd_discipline.ebcname, 4);
6704 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6705 GFP_KERNEL | GFP_DMA);
6706 if (!dasd_reserve_req)
6708 dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
6709 GFP_KERNEL | GFP_DMA);
6710 if (!dasd_vol_info_req)
6712 pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
6713 GFP_KERNEL | GFP_DMA);
6714 if (!pe_handler_worker) {
6715 kfree(dasd_reserve_req);
6716 kfree(dasd_vol_info_req);
6719 rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6721 kfree(pe_handler_worker);
6722 kfree(dasd_reserve_req);
6723 kfree(dasd_vol_info_req);
6726 ret = ccw_driver_register(&dasd_eckd_driver);
6728 wait_for_device_probe();
6730 kfree(pe_handler_worker);
6731 kfree(dasd_reserve_req);
6732 kfree(dasd_vol_info_req);
6733 free_page((unsigned long)rawpadpage);
6739 dasd_eckd_cleanup(void)
6741 ccw_driver_unregister(&dasd_eckd_driver);
6742 kfree(pe_handler_worker);
6743 kfree(dasd_reserve_req);
6744 free_page((unsigned long)rawpadpage);
6747 module_init(dasd_eckd_init);
6748 module_exit(dasd_eckd_cleanup);