1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11 */
12
13 #include <linux/stddef.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/hdreg.h> /* HDIO_GETGEO */
17 #include <linux/bio.h>
18 #include <linux/module.h>
19 #include <linux/compat.h>
20 #include <linux/init.h>
21 #include <linux/seq_file.h>
22 #include <linux/uaccess.h>
23 #include <linux/io.h>
24
25 #include <asm/css_chars.h>
26 #include <asm/machine.h>
27 #include <asm/debug.h>
28 #include <asm/idals.h>
29 #include <asm/ebcdic.h>
30 #include <asm/cio.h>
31 #include <asm/ccwdev.h>
32 #include <asm/itcw.h>
33 #include <asm/schid.h>
34 #include <asm/chpid.h>
35
36 #include "dasd_int.h"
37 #include "dasd_eckd.h"
38
39 /*
40 * raw track access always map to 64k in memory
41 * so it maps to 16 blocks of 4k per track
42 */
43 #define DASD_RAW_BLOCK_PER_TRACK 16
44 #define DASD_RAW_BLOCKSIZE 4096
45 /* 64k are 128 x 512 byte sectors */
46 #define DASD_RAW_SECTORS_PER_TRACK 128
47
48 MODULE_DESCRIPTION("S/390 DASD ECKD Disks device driver");
49 MODULE_LICENSE("GPL");
50
51 static struct dasd_discipline dasd_eckd_discipline;
52
53 /* The ccw bus type uses this table to find devices that it sends to
54 * dasd_eckd_probe */
55 static struct ccw_device_id dasd_eckd_ids[] = {
56 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
57 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
58 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
59 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
60 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
61 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
62 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
63 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
64 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
65 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
66 { /* end of list */ },
67 };
68
69 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
70
71 static struct ccw_driver dasd_eckd_driver; /* see below */
72
73 static void *rawpadpage;
74
75 #define INIT_CQR_OK 0
76 #define INIT_CQR_UNFORMATTED 1
77 #define INIT_CQR_ERROR 2
78
79 /* emergency request for reserve/release */
80 static struct {
81 struct dasd_ccw_req cqr;
82 struct ccw1 ccw;
83 char data[32];
84 } *dasd_reserve_req;
85 static DEFINE_MUTEX(dasd_reserve_mutex);
86
87 static struct {
88 struct dasd_ccw_req cqr;
89 struct ccw1 ccw[2];
90 char data[40];
91 } *dasd_vol_info_req;
92 static DEFINE_MUTEX(dasd_vol_info_mutex);
93
94 struct ext_pool_exhaust_work_data {
95 struct work_struct worker;
96 struct dasd_device *device;
97 struct dasd_device *base;
98 };
99
100 /* definitions for the path verification worker */
101 struct pe_handler_work_data {
102 struct work_struct worker;
103 struct dasd_device *device;
104 struct dasd_ccw_req cqr;
105 struct ccw1 ccw;
106 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
107 int isglobal;
108 __u8 tbvpm;
109 __u8 fcsecpm;
110 };
111 static struct pe_handler_work_data *pe_handler_worker;
112 static DEFINE_MUTEX(dasd_pe_handler_mutex);
113
114 struct check_attention_work_data {
115 struct work_struct worker;
116 struct dasd_device *device;
117 __u8 lpum;
118 };
119
120 static int dasd_eckd_ext_pool_id(struct dasd_device *);
121 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
122 struct dasd_device *, struct dasd_device *,
123 unsigned int, int, unsigned int, unsigned int,
124 unsigned int, unsigned int);
125 static int dasd_eckd_query_pprc_status(struct dasd_device *,
126 struct dasd_pprc_data_sc4 *);
127
128 /* initial attempt at a probe function. this can be simplified once
129 * the other detection code is gone */
130 static int
dasd_eckd_probe(struct ccw_device * cdev)131 dasd_eckd_probe (struct ccw_device *cdev)
132 {
133 int ret;
134
135 /* set ECKD specific ccw-device options */
136 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
137 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
138 if (ret) {
139 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
140 "dasd_eckd_probe: could not set "
141 "ccw-device options");
142 return ret;
143 }
144 ret = dasd_generic_probe(cdev);
145 return ret;
146 }
147
148 static int
dasd_eckd_set_online(struct ccw_device * cdev)149 dasd_eckd_set_online(struct ccw_device *cdev)
150 {
151 return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
152 }
153
154 static const int sizes_trk0[] = { 28, 148, 84 };
155 #define LABEL_SIZE 140
156
157 /* head and record addresses of count_area read in analysis ccw */
158 static const int count_area_head[] = { 0, 0, 0, 0, 1 };
159 static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
160
161 static inline unsigned int
ceil_quot(unsigned int d1,unsigned int d2)162 ceil_quot(unsigned int d1, unsigned int d2)
163 {
164 return (d1 + (d2 - 1)) / d2;
165 }
166
167 static unsigned int
recs_per_track(struct dasd_eckd_characteristics * rdc,unsigned int kl,unsigned int dl)168 recs_per_track(struct dasd_eckd_characteristics * rdc,
169 unsigned int kl, unsigned int dl)
170 {
171 int dn, kn;
172
173 switch (rdc->dev_type) {
174 case 0x3380:
175 if (kl)
176 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
177 ceil_quot(dl + 12, 32));
178 else
179 return 1499 / (15 + ceil_quot(dl + 12, 32));
180 case 0x3390:
181 dn = ceil_quot(dl + 6, 232) + 1;
182 if (kl) {
183 kn = ceil_quot(kl + 6, 232) + 1;
184 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
185 9 + ceil_quot(dl + 6 * dn, 34));
186 } else
187 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
188 case 0x9345:
189 dn = ceil_quot(dl + 6, 232) + 1;
190 if (kl) {
191 kn = ceil_quot(kl + 6, 232) + 1;
192 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
193 ceil_quot(dl + 6 * dn, 34));
194 } else
195 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
196 }
197 return 0;
198 }
199
set_ch_t(struct ch_t * geo,__u32 cyl,__u8 head)200 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
201 {
202 geo->cyl = (__u16) cyl;
203 geo->head = cyl >> 16;
204 geo->head <<= 4;
205 geo->head |= head;
206 }
207
208 /*
209 * calculate failing track from sense data depending if
210 * it is an EAV device or not
211 */
dasd_eckd_track_from_irb(struct irb * irb,struct dasd_device * device,sector_t * track)212 static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
213 sector_t *track)
214 {
215 struct dasd_eckd_private *private = device->private;
216 u8 *sense = NULL;
217 u32 cyl;
218 u8 head;
219
220 sense = dasd_get_sense(irb);
221 if (!sense) {
222 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
223 "ESE error no sense data\n");
224 return -EINVAL;
225 }
226 if (!(sense[27] & DASD_SENSE_BIT_2)) {
227 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
228 "ESE error no valid track data\n");
229 return -EINVAL;
230 }
231
232 if (sense[27] & DASD_SENSE_BIT_3) {
233 /* enhanced addressing */
234 cyl = sense[30] << 20;
235 cyl |= (sense[31] & 0xF0) << 12;
236 cyl |= sense[28] << 8;
237 cyl |= sense[29];
238 } else {
239 cyl = sense[29] << 8;
240 cyl |= sense[30];
241 }
242 head = sense[31] & 0x0F;
243 *track = cyl * private->rdc_data.trk_per_cyl + head;
244 return 0;
245 }
246
set_timestamp(struct ccw1 * ccw,struct DE_eckd_data * data,struct dasd_device * device)247 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
248 struct dasd_device *device)
249 {
250 struct dasd_eckd_private *private = device->private;
251 int rc;
252
253 rc = get_phys_clock(&data->ep_sys_time);
254 /*
255 * Ignore return code if XRC is not supported or
256 * sync clock is switched off
257 */
258 if ((rc && !private->rdc_data.facilities.XRC_supported) ||
259 rc == -EOPNOTSUPP || rc == -EACCES)
260 return 0;
261
262 /* switch on System Time Stamp - needed for XRC Support */
263 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
264 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
265
266 if (ccw) {
267 ccw->count = sizeof(struct DE_eckd_data);
268 ccw->flags |= CCW_FLAG_SLI;
269 }
270
271 return rc;
272 }
273
274 static int
define_extent(struct ccw1 * ccw,struct DE_eckd_data * data,unsigned int trk,unsigned int totrk,int cmd,struct dasd_device * device,int blksize)275 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
276 unsigned int totrk, int cmd, struct dasd_device *device,
277 int blksize)
278 {
279 struct dasd_eckd_private *private = device->private;
280 u16 heads, beghead, endhead;
281 u32 begcyl, endcyl;
282 int rc = 0;
283
284 if (ccw) {
285 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
286 ccw->flags = 0;
287 ccw->count = 16;
288 ccw->cda = virt_to_dma32(data);
289 }
290
291 memset(data, 0, sizeof(struct DE_eckd_data));
292 switch (cmd) {
293 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
294 case DASD_ECKD_CCW_READ_RECORD_ZERO:
295 case DASD_ECKD_CCW_READ:
296 case DASD_ECKD_CCW_READ_MT:
297 case DASD_ECKD_CCW_READ_CKD:
298 case DASD_ECKD_CCW_READ_CKD_MT:
299 case DASD_ECKD_CCW_READ_KD:
300 case DASD_ECKD_CCW_READ_KD_MT:
301 data->mask.perm = 0x1;
302 data->attributes.operation = private->attrib.operation;
303 break;
304 case DASD_ECKD_CCW_READ_COUNT:
305 data->mask.perm = 0x1;
306 data->attributes.operation = DASD_BYPASS_CACHE;
307 break;
308 case DASD_ECKD_CCW_READ_TRACK:
309 case DASD_ECKD_CCW_READ_TRACK_DATA:
310 data->mask.perm = 0x1;
311 data->attributes.operation = private->attrib.operation;
312 data->blk_size = 0;
313 break;
314 case DASD_ECKD_CCW_WRITE:
315 case DASD_ECKD_CCW_WRITE_MT:
316 case DASD_ECKD_CCW_WRITE_KD:
317 case DASD_ECKD_CCW_WRITE_KD_MT:
318 data->mask.perm = 0x02;
319 data->attributes.operation = private->attrib.operation;
320 rc = set_timestamp(ccw, data, device);
321 break;
322 case DASD_ECKD_CCW_WRITE_CKD:
323 case DASD_ECKD_CCW_WRITE_CKD_MT:
324 data->attributes.operation = DASD_BYPASS_CACHE;
325 rc = set_timestamp(ccw, data, device);
326 break;
327 case DASD_ECKD_CCW_ERASE:
328 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
329 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
330 data->mask.perm = 0x3;
331 data->mask.auth = 0x1;
332 data->attributes.operation = DASD_BYPASS_CACHE;
333 rc = set_timestamp(ccw, data, device);
334 break;
335 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
336 data->mask.perm = 0x03;
337 data->attributes.operation = private->attrib.operation;
338 data->blk_size = 0;
339 break;
340 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
341 data->mask.perm = 0x02;
342 data->attributes.operation = private->attrib.operation;
343 data->blk_size = blksize;
344 rc = set_timestamp(ccw, data, device);
345 break;
346 default:
347 dev_err(&device->cdev->dev,
348 "0x%x is not a known command\n", cmd);
349 break;
350 }
351
352 data->attributes.mode = 0x3; /* ECKD */
353
354 if ((private->rdc_data.cu_type == 0x2105 ||
355 private->rdc_data.cu_type == 0x2107 ||
356 private->rdc_data.cu_type == 0x1750)
357 && !(private->uses_cdl && trk < 2))
358 data->ga_extended |= 0x40; /* Regular Data Format Mode */
359
360 heads = private->rdc_data.trk_per_cyl;
361 begcyl = trk / heads;
362 beghead = trk % heads;
363 endcyl = totrk / heads;
364 endhead = totrk % heads;
365
366 /* check for sequential prestage - enhance cylinder range */
367 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
368 data->attributes.operation == DASD_SEQ_ACCESS) {
369
370 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
371 endcyl += private->attrib.nr_cyl;
372 else
373 endcyl = (private->real_cyl - 1);
374 }
375
376 set_ch_t(&data->beg_ext, begcyl, beghead);
377 set_ch_t(&data->end_ext, endcyl, endhead);
378 return rc;
379 }
380
381
locate_record_ext(struct ccw1 * ccw,struct LRE_eckd_data * data,unsigned int trk,unsigned int rec_on_trk,int count,int cmd,struct dasd_device * device,unsigned int reclen,unsigned int tlf)382 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
383 unsigned int trk, unsigned int rec_on_trk,
384 int count, int cmd, struct dasd_device *device,
385 unsigned int reclen, unsigned int tlf)
386 {
387 struct dasd_eckd_private *private = device->private;
388 int sector;
389 int dn, d;
390
391 if (ccw) {
392 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
393 ccw->flags = 0;
394 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
395 ccw->count = 22;
396 else
397 ccw->count = 20;
398 ccw->cda = virt_to_dma32(data);
399 }
400
401 memset(data, 0, sizeof(*data));
402 sector = 0;
403 if (rec_on_trk) {
404 switch (private->rdc_data.dev_type) {
405 case 0x3390:
406 dn = ceil_quot(reclen + 6, 232);
407 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
408 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
409 break;
410 case 0x3380:
411 d = 7 + ceil_quot(reclen + 12, 32);
412 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
413 break;
414 }
415 }
416 data->sector = sector;
417 /* note: meaning of count depends on the operation
418 * for record based I/O it's the number of records, but for
419 * track based I/O it's the number of tracks
420 */
421 data->count = count;
422 switch (cmd) {
423 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
424 data->operation.orientation = 0x3;
425 data->operation.operation = 0x03;
426 break;
427 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
428 data->operation.orientation = 0x3;
429 data->operation.operation = 0x16;
430 break;
431 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
432 data->operation.orientation = 0x1;
433 data->operation.operation = 0x03;
434 data->count++;
435 break;
436 case DASD_ECKD_CCW_READ_RECORD_ZERO:
437 data->operation.orientation = 0x3;
438 data->operation.operation = 0x16;
439 data->count++;
440 break;
441 case DASD_ECKD_CCW_WRITE:
442 case DASD_ECKD_CCW_WRITE_MT:
443 case DASD_ECKD_CCW_WRITE_KD:
444 case DASD_ECKD_CCW_WRITE_KD_MT:
445 data->auxiliary.length_valid = 0x1;
446 data->length = reclen;
447 data->operation.operation = 0x01;
448 break;
449 case DASD_ECKD_CCW_WRITE_CKD:
450 case DASD_ECKD_CCW_WRITE_CKD_MT:
451 data->auxiliary.length_valid = 0x1;
452 data->length = reclen;
453 data->operation.operation = 0x03;
454 break;
455 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
456 data->operation.orientation = 0x0;
457 data->operation.operation = 0x3F;
458 data->extended_operation = 0x11;
459 data->length = 0;
460 data->extended_parameter_length = 0x02;
461 if (data->count > 8) {
462 data->extended_parameter[0] = 0xFF;
463 data->extended_parameter[1] = 0xFF;
464 data->extended_parameter[1] <<= (16 - count);
465 } else {
466 data->extended_parameter[0] = 0xFF;
467 data->extended_parameter[0] <<= (8 - count);
468 data->extended_parameter[1] = 0x00;
469 }
470 data->sector = 0xFF;
471 break;
472 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
473 data->auxiliary.length_valid = 0x1;
474 data->length = reclen; /* not tlf, as one might think */
475 data->operation.operation = 0x3F;
476 data->extended_operation = 0x23;
477 break;
478 case DASD_ECKD_CCW_READ:
479 case DASD_ECKD_CCW_READ_MT:
480 case DASD_ECKD_CCW_READ_KD:
481 case DASD_ECKD_CCW_READ_KD_MT:
482 data->auxiliary.length_valid = 0x1;
483 data->length = reclen;
484 data->operation.operation = 0x06;
485 break;
486 case DASD_ECKD_CCW_READ_CKD:
487 case DASD_ECKD_CCW_READ_CKD_MT:
488 data->auxiliary.length_valid = 0x1;
489 data->length = reclen;
490 data->operation.operation = 0x16;
491 break;
492 case DASD_ECKD_CCW_READ_COUNT:
493 data->operation.operation = 0x06;
494 break;
495 case DASD_ECKD_CCW_READ_TRACK:
496 data->operation.orientation = 0x1;
497 data->operation.operation = 0x0C;
498 data->extended_parameter_length = 0;
499 data->sector = 0xFF;
500 break;
501 case DASD_ECKD_CCW_READ_TRACK_DATA:
502 data->auxiliary.length_valid = 0x1;
503 data->length = tlf;
504 data->operation.operation = 0x0C;
505 break;
506 case DASD_ECKD_CCW_ERASE:
507 data->length = reclen;
508 data->auxiliary.length_valid = 0x1;
509 data->operation.operation = 0x0b;
510 break;
511 default:
512 DBF_DEV_EVENT(DBF_ERR, device,
513 "fill LRE unknown opcode 0x%x", cmd);
514 BUG();
515 }
516 set_ch_t(&data->seek_addr,
517 trk / private->rdc_data.trk_per_cyl,
518 trk % private->rdc_data.trk_per_cyl);
519 data->search_arg.cyl = data->seek_addr.cyl;
520 data->search_arg.head = data->seek_addr.head;
521 data->search_arg.record = rec_on_trk;
522 }
523
prefix_LRE(struct ccw1 * ccw,struct PFX_eckd_data * pfxdata,unsigned int trk,unsigned int totrk,int cmd,struct dasd_device * basedev,struct dasd_device * startdev,unsigned int format,unsigned int rec_on_trk,int count,unsigned int blksize,unsigned int tlf)524 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
525 unsigned int trk, unsigned int totrk, int cmd,
526 struct dasd_device *basedev, struct dasd_device *startdev,
527 unsigned int format, unsigned int rec_on_trk, int count,
528 unsigned int blksize, unsigned int tlf)
529 {
530 struct dasd_eckd_private *basepriv, *startpriv;
531 struct LRE_eckd_data *lredata;
532 struct DE_eckd_data *dedata;
533 int rc = 0;
534
535 basepriv = basedev->private;
536 startpriv = startdev->private;
537 dedata = &pfxdata->define_extent;
538 lredata = &pfxdata->locate_record;
539
540 ccw->cmd_code = DASD_ECKD_CCW_PFX;
541 ccw->flags = 0;
542 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
543 ccw->count = sizeof(*pfxdata) + 2;
544 ccw->cda = virt_to_dma32(pfxdata);
545 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
546 } else {
547 ccw->count = sizeof(*pfxdata);
548 ccw->cda = virt_to_dma32(pfxdata);
549 memset(pfxdata, 0, sizeof(*pfxdata));
550 }
551
552 /* prefix data */
553 if (format > 1) {
554 DBF_DEV_EVENT(DBF_ERR, basedev,
555 "PFX LRE unknown format 0x%x", format);
556 BUG();
557 return -EINVAL;
558 }
559 pfxdata->format = format;
560 pfxdata->base_address = basepriv->conf.ned->unit_addr;
561 pfxdata->base_lss = basepriv->conf.ned->ID;
562 pfxdata->validity.define_extent = 1;
563
564 /* private uid is kept up to date, conf_data may be outdated */
565 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
566 pfxdata->validity.verify_base = 1;
567
568 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
569 pfxdata->validity.verify_base = 1;
570 pfxdata->validity.hyper_pav = 1;
571 }
572
573 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
574
575 /*
576 * For some commands the System Time Stamp is set in the define extent
577 * data when XRC is supported. The validity of the time stamp must be
578 * reflected in the prefix data as well.
579 */
580 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
581 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
582
583 if (format == 1) {
584 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
585 basedev, blksize, tlf);
586 }
587
588 return rc;
589 }
590
prefix(struct ccw1 * ccw,struct PFX_eckd_data * pfxdata,unsigned int trk,unsigned int totrk,int cmd,struct dasd_device * basedev,struct dasd_device * startdev)591 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
592 unsigned int trk, unsigned int totrk, int cmd,
593 struct dasd_device *basedev, struct dasd_device *startdev)
594 {
595 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
596 0, 0, 0, 0, 0);
597 }
598
599 static void
locate_record(struct ccw1 * ccw,struct LO_eckd_data * data,unsigned int trk,unsigned int rec_on_trk,int no_rec,int cmd,struct dasd_device * device,int reclen)600 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
601 unsigned int rec_on_trk, int no_rec, int cmd,
602 struct dasd_device * device, int reclen)
603 {
604 struct dasd_eckd_private *private = device->private;
605 int sector;
606 int dn, d;
607
608 DBF_DEV_EVENT(DBF_INFO, device,
609 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
610 trk, rec_on_trk, no_rec, cmd, reclen);
611
612 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
613 ccw->flags = 0;
614 ccw->count = 16;
615 ccw->cda = virt_to_dma32(data);
616
617 memset(data, 0, sizeof(struct LO_eckd_data));
618 sector = 0;
619 if (rec_on_trk) {
620 switch (private->rdc_data.dev_type) {
621 case 0x3390:
622 dn = ceil_quot(reclen + 6, 232);
623 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
624 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
625 break;
626 case 0x3380:
627 d = 7 + ceil_quot(reclen + 12, 32);
628 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
629 break;
630 }
631 }
632 data->sector = sector;
633 data->count = no_rec;
634 switch (cmd) {
635 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
636 data->operation.orientation = 0x3;
637 data->operation.operation = 0x03;
638 break;
639 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
640 data->operation.orientation = 0x3;
641 data->operation.operation = 0x16;
642 break;
643 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
644 data->operation.orientation = 0x1;
645 data->operation.operation = 0x03;
646 data->count++;
647 break;
648 case DASD_ECKD_CCW_READ_RECORD_ZERO:
649 data->operation.orientation = 0x3;
650 data->operation.operation = 0x16;
651 data->count++;
652 break;
653 case DASD_ECKD_CCW_WRITE:
654 case DASD_ECKD_CCW_WRITE_MT:
655 case DASD_ECKD_CCW_WRITE_KD:
656 case DASD_ECKD_CCW_WRITE_KD_MT:
657 data->auxiliary.last_bytes_used = 0x1;
658 data->length = reclen;
659 data->operation.operation = 0x01;
660 break;
661 case DASD_ECKD_CCW_WRITE_CKD:
662 case DASD_ECKD_CCW_WRITE_CKD_MT:
663 data->auxiliary.last_bytes_used = 0x1;
664 data->length = reclen;
665 data->operation.operation = 0x03;
666 break;
667 case DASD_ECKD_CCW_READ:
668 case DASD_ECKD_CCW_READ_MT:
669 case DASD_ECKD_CCW_READ_KD:
670 case DASD_ECKD_CCW_READ_KD_MT:
671 data->auxiliary.last_bytes_used = 0x1;
672 data->length = reclen;
673 data->operation.operation = 0x06;
674 break;
675 case DASD_ECKD_CCW_READ_CKD:
676 case DASD_ECKD_CCW_READ_CKD_MT:
677 data->auxiliary.last_bytes_used = 0x1;
678 data->length = reclen;
679 data->operation.operation = 0x16;
680 break;
681 case DASD_ECKD_CCW_READ_COUNT:
682 data->operation.operation = 0x06;
683 break;
684 case DASD_ECKD_CCW_ERASE:
685 data->length = reclen;
686 data->auxiliary.last_bytes_used = 0x1;
687 data->operation.operation = 0x0b;
688 break;
689 default:
690 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
691 "opcode 0x%x", cmd);
692 }
693 set_ch_t(&data->seek_addr,
694 trk / private->rdc_data.trk_per_cyl,
695 trk % private->rdc_data.trk_per_cyl);
696 data->search_arg.cyl = data->seek_addr.cyl;
697 data->search_arg.head = data->seek_addr.head;
698 data->search_arg.record = rec_on_trk;
699 }
700
701 /*
702 * Returns 1 if the block is one of the special blocks that needs
703 * to get read/written with the KD variant of the command.
704 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
705 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
706 * Luckily the KD variants differ only by one bit (0x08) from the
707 * normal variant. So don't wonder about code like:
708 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
709 * ccw->cmd_code |= 0x8;
710 */
711 static inline int
dasd_eckd_cdl_special(int blk_per_trk,int recid)712 dasd_eckd_cdl_special(int blk_per_trk, int recid)
713 {
714 if (recid < 3)
715 return 1;
716 if (recid < blk_per_trk)
717 return 0;
718 if (recid < 2 * blk_per_trk)
719 return 1;
720 return 0;
721 }
722
723 /*
724 * Returns the record size for the special blocks of the cdl format.
725 * Only returns something useful if dasd_eckd_cdl_special is true
726 * for the recid.
727 */
728 static inline int
dasd_eckd_cdl_reclen(int recid)729 dasd_eckd_cdl_reclen(int recid)
730 {
731 if (recid < 3)
732 return sizes_trk0[recid];
733 return LABEL_SIZE;
734 }
735 /* create unique id from private structure. */
create_uid(struct dasd_conf * conf,struct dasd_uid * uid)736 static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid)
737 {
738 int count;
739
740 memset(uid, 0, sizeof(struct dasd_uid));
741 memcpy(uid->vendor, conf->ned->HDA_manufacturer,
742 sizeof(uid->vendor) - 1);
743 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
744 memcpy(uid->serial, &conf->ned->serial,
745 sizeof(uid->serial) - 1);
746 EBCASC(uid->serial, sizeof(uid->serial) - 1);
747 uid->ssid = conf->gneq->subsystemID;
748 uid->real_unit_addr = conf->ned->unit_addr;
749 if (conf->sneq) {
750 uid->type = conf->sneq->sua_flags;
751 if (uid->type == UA_BASE_PAV_ALIAS)
752 uid->base_unit_addr = conf->sneq->base_unit_addr;
753 } else {
754 uid->type = UA_BASE_DEVICE;
755 }
756 if (conf->vdsneq) {
757 for (count = 0; count < 16; count++) {
758 sprintf(uid->vduit+2*count, "%02x",
759 conf->vdsneq->uit[count]);
760 }
761 }
762 }
763
764 /*
765 * Generate device unique id that specifies the physical device.
766 */
dasd_eckd_generate_uid(struct dasd_device * device)767 static int dasd_eckd_generate_uid(struct dasd_device *device)
768 {
769 struct dasd_eckd_private *private = device->private;
770 unsigned long flags;
771
772 if (!private)
773 return -ENODEV;
774 if (!private->conf.ned || !private->conf.gneq)
775 return -ENODEV;
776 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
777 create_uid(&private->conf, &private->uid);
778 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
779 return 0;
780 }
781
dasd_eckd_get_uid(struct dasd_device * device,struct dasd_uid * uid)782 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
783 {
784 struct dasd_eckd_private *private = device->private;
785 unsigned long flags;
786
787 if (private) {
788 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
789 *uid = private->uid;
790 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
791 return 0;
792 }
793 return -EINVAL;
794 }
795
796 /*
797 * compare device UID with data of a given dasd_eckd_private structure
798 * return 0 for match
799 */
dasd_eckd_compare_path_uid(struct dasd_device * device,struct dasd_conf * path_conf)800 static int dasd_eckd_compare_path_uid(struct dasd_device *device,
801 struct dasd_conf *path_conf)
802 {
803 struct dasd_uid device_uid;
804 struct dasd_uid path_uid;
805
806 create_uid(path_conf, &path_uid);
807 dasd_eckd_get_uid(device, &device_uid);
808
809 return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid));
810 }
811
dasd_eckd_fill_rcd_cqr(struct dasd_device * device,struct dasd_ccw_req * cqr,__u8 * rcd_buffer,__u8 lpm)812 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
813 struct dasd_ccw_req *cqr,
814 __u8 *rcd_buffer,
815 __u8 lpm)
816 {
817 struct ccw1 *ccw;
818 /*
819 * buffer has to start with EBCDIC "V1.0" to show
820 * support for virtual device SNEQ
821 */
822 rcd_buffer[0] = 0xE5;
823 rcd_buffer[1] = 0xF1;
824 rcd_buffer[2] = 0x4B;
825 rcd_buffer[3] = 0xF0;
826
827 ccw = cqr->cpaddr;
828 ccw->cmd_code = DASD_ECKD_CCW_RCD;
829 ccw->flags = 0;
830 ccw->cda = virt_to_dma32(rcd_buffer);
831 ccw->count = DASD_ECKD_RCD_DATA_SIZE;
832 cqr->magic = DASD_ECKD_MAGIC;
833
834 cqr->startdev = device;
835 cqr->memdev = device;
836 cqr->block = NULL;
837 cqr->expires = 10*HZ;
838 cqr->lpm = lpm;
839 cqr->retries = 256;
840 cqr->buildclk = get_tod_clock();
841 cqr->status = DASD_CQR_FILLED;
842 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
843 }
844
845 /*
846 * Wakeup helper for read_conf
847 * if the cqr is not done and needs some error recovery
848 * the buffer has to be re-initialized with the EBCDIC "V1.0"
849 * to show support for virtual device SNEQ
850 */
read_conf_cb(struct dasd_ccw_req * cqr,void * data)851 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
852 {
853 struct ccw1 *ccw;
854 __u8 *rcd_buffer;
855
856 if (cqr->status != DASD_CQR_DONE) {
857 ccw = cqr->cpaddr;
858 rcd_buffer = dma32_to_virt(ccw->cda);
859 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
860
861 rcd_buffer[0] = 0xE5;
862 rcd_buffer[1] = 0xF1;
863 rcd_buffer[2] = 0x4B;
864 rcd_buffer[3] = 0xF0;
865 }
866 dasd_wakeup_cb(cqr, data);
867 }
868
dasd_eckd_read_conf_immediately(struct dasd_device * device,struct dasd_ccw_req * cqr,__u8 * rcd_buffer,__u8 lpm)869 static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
870 struct dasd_ccw_req *cqr,
871 __u8 *rcd_buffer,
872 __u8 lpm)
873 {
874 struct ciw *ciw;
875 int rc;
876 /*
877 * sanity check: scan for RCD command in extended SenseID data
878 * some devices do not support RCD
879 */
880 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
881 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
882 return -EOPNOTSUPP;
883
884 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
885 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
886 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
887 cqr->retries = 5;
888 cqr->callback = read_conf_cb;
889 rc = dasd_sleep_on_immediatly(cqr);
890 return rc;
891 }
892
dasd_eckd_read_conf_lpm(struct dasd_device * device,void ** rcd_buffer,int * rcd_buffer_size,__u8 lpm)893 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
894 void **rcd_buffer,
895 int *rcd_buffer_size, __u8 lpm)
896 {
897 struct ciw *ciw;
898 char *rcd_buf = NULL;
899 int ret;
900 struct dasd_ccw_req *cqr;
901
902 /*
903 * sanity check: scan for RCD command in extended SenseID data
904 * some devices do not support RCD
905 */
906 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
907 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
908 ret = -EOPNOTSUPP;
909 goto out_error;
910 }
911 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
912 if (!rcd_buf) {
913 ret = -ENOMEM;
914 goto out_error;
915 }
916 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
917 0, /* use rcd_buf as data ara */
918 device, NULL);
919 if (IS_ERR(cqr)) {
920 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
921 "Could not allocate RCD request");
922 ret = -ENOMEM;
923 goto out_error;
924 }
925 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
926 cqr->callback = read_conf_cb;
927 ret = dasd_sleep_on(cqr);
928 /*
929 * on success we update the user input parms
930 */
931 dasd_sfree_request(cqr, cqr->memdev);
932 if (ret)
933 goto out_error;
934
935 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
936 *rcd_buffer = rcd_buf;
937 return 0;
938 out_error:
939 kfree(rcd_buf);
940 *rcd_buffer = NULL;
941 *rcd_buffer_size = 0;
942 return ret;
943 }
944
dasd_eckd_identify_conf_parts(struct dasd_conf * conf)945 static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf)
946 {
947
948 struct dasd_sneq *sneq;
949 int i, count;
950
951 conf->ned = NULL;
952 conf->sneq = NULL;
953 conf->vdsneq = NULL;
954 conf->gneq = NULL;
955 count = conf->len / sizeof(struct dasd_sneq);
956 sneq = (struct dasd_sneq *)conf->data;
957 for (i = 0; i < count; ++i) {
958 if (sneq->flags.identifier == 1 && sneq->format == 1)
959 conf->sneq = sneq;
960 else if (sneq->flags.identifier == 1 && sneq->format == 4)
961 conf->vdsneq = (struct vd_sneq *)sneq;
962 else if (sneq->flags.identifier == 2)
963 conf->gneq = (struct dasd_gneq *)sneq;
964 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
965 conf->ned = (struct dasd_ned *)sneq;
966 sneq++;
967 }
968 if (!conf->ned || !conf->gneq) {
969 conf->ned = NULL;
970 conf->sneq = NULL;
971 conf->vdsneq = NULL;
972 conf->gneq = NULL;
973 return -EINVAL;
974 }
975 return 0;
976
977 };
978
dasd_eckd_path_access(void * conf_data,int conf_len)979 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
980 {
981 struct dasd_gneq *gneq;
982 int i, count, found;
983
984 count = conf_len / sizeof(*gneq);
985 gneq = (struct dasd_gneq *)conf_data;
986 found = 0;
987 for (i = 0; i < count; ++i) {
988 if (gneq->flags.identifier == 2) {
989 found = 1;
990 break;
991 }
992 gneq++;
993 }
994 if (found)
995 return ((char *)gneq)[18] & 0x07;
996 else
997 return 0;
998 }
999
dasd_eckd_store_conf_data(struct dasd_device * device,struct dasd_conf_data * conf_data,int chp)1000 static void dasd_eckd_store_conf_data(struct dasd_device *device,
1001 struct dasd_conf_data *conf_data, int chp)
1002 {
1003 struct dasd_eckd_private *private = device->private;
1004 struct channel_path_desc_fmt0 *chp_desc;
1005 struct subchannel_id sch_id;
1006 void *cdp;
1007
1008 /*
1009 * path handling and read_conf allocate data
1010 * free it before replacing the pointer
1011 * also replace the old private->conf_data pointer
1012 * with the new one if this points to the same data
1013 */
1014 cdp = device->path[chp].conf_data;
1015 if (private->conf.data == cdp) {
1016 private->conf.data = (void *)conf_data;
1017 dasd_eckd_identify_conf_parts(&private->conf);
1018 }
1019 ccw_device_get_schid(device->cdev, &sch_id);
1020 device->path[chp].conf_data = conf_data;
1021 device->path[chp].cssid = sch_id.cssid;
1022 device->path[chp].ssid = sch_id.ssid;
1023 chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
1024 if (chp_desc)
1025 device->path[chp].chpid = chp_desc->chpid;
1026 kfree(chp_desc);
1027 kfree(cdp);
1028 }
1029
dasd_eckd_clear_conf_data(struct dasd_device * device)1030 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1031 {
1032 struct dasd_eckd_private *private = device->private;
1033 int i;
1034
1035 private->conf.data = NULL;
1036 private->conf.len = 0;
1037 for (i = 0; i < 8; i++) {
1038 kfree(device->path[i].conf_data);
1039 device->path[i].conf_data = NULL;
1040 device->path[i].cssid = 0;
1041 device->path[i].ssid = 0;
1042 device->path[i].chpid = 0;
1043 dasd_path_notoper(device, i);
1044 }
1045 }
1046
dasd_eckd_read_fc_security(struct dasd_device * device)1047 static void dasd_eckd_read_fc_security(struct dasd_device *device)
1048 {
1049 struct dasd_eckd_private *private = device->private;
1050 u8 esm_valid;
1051 u8 esm[8];
1052 int chp;
1053 int rc;
1054
1055 rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
1056 if (rc) {
1057 for (chp = 0; chp < 8; chp++)
1058 device->path[chp].fc_security = 0;
1059 return;
1060 }
1061
1062 for (chp = 0; chp < 8; chp++) {
1063 if (esm_valid & (0x80 >> chp))
1064 device->path[chp].fc_security = esm[chp];
1065 else
1066 device->path[chp].fc_security = 0;
1067 }
1068 }
1069
dasd_eckd_get_uid_string(struct dasd_conf * conf,char * print_uid)1070 static void dasd_eckd_get_uid_string(struct dasd_conf *conf, char *print_uid)
1071 {
1072 struct dasd_uid uid;
1073
1074 create_uid(conf, &uid);
1075 snprintf(print_uid, DASD_UID_STRLEN, "%s.%s.%04x.%02x%s%s",
1076 uid.vendor, uid.serial, uid.ssid, uid.real_unit_addr,
1077 uid.vduit[0] ? "." : "", uid.vduit);
1078 }
1079
dasd_eckd_check_cabling(struct dasd_device * device,void * conf_data,__u8 lpm)1080 static int dasd_eckd_check_cabling(struct dasd_device *device,
1081 void *conf_data, __u8 lpm)
1082 {
1083 char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
1084 struct dasd_eckd_private *private = device->private;
1085 struct dasd_conf path_conf;
1086
1087 path_conf.data = conf_data;
1088 path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
1089 if (dasd_eckd_identify_conf_parts(&path_conf))
1090 return 1;
1091
1092 if (dasd_eckd_compare_path_uid(device, &path_conf)) {
1093 dasd_eckd_get_uid_string(&path_conf, print_path_uid);
1094 dasd_eckd_get_uid_string(&private->conf, print_device_uid);
1095 dev_err(&device->cdev->dev,
1096 "Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
1097 lpm, print_path_uid, print_device_uid);
1098 return 1;
1099 }
1100
1101 return 0;
1102 }
1103
dasd_eckd_read_conf(struct dasd_device * device)1104 static int dasd_eckd_read_conf(struct dasd_device *device)
1105 {
1106 void *conf_data;
1107 int conf_len, conf_data_saved;
1108 int rc, path_err, pos;
1109 __u8 lpm, opm;
1110 struct dasd_eckd_private *private;
1111
1112 private = device->private;
1113 opm = ccw_device_get_path_mask(device->cdev);
1114 conf_data_saved = 0;
1115 path_err = 0;
1116 /* get configuration data per operational path */
1117 for (lpm = 0x80; lpm; lpm>>= 1) {
1118 if (!(lpm & opm))
1119 continue;
1120 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1121 &conf_len, lpm);
1122 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
1123 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1124 "Read configuration data returned "
1125 "error %d", rc);
1126 return rc;
1127 }
1128 if (conf_data == NULL) {
1129 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1130 "No configuration data "
1131 "retrieved");
1132 /* no further analysis possible */
1133 dasd_path_add_opm(device, opm);
1134 continue; /* no error */
1135 }
1136 /* save first valid configuration data */
1137 if (!conf_data_saved) {
1138 /* initially clear previously stored conf_data */
1139 dasd_eckd_clear_conf_data(device);
1140 private->conf.data = conf_data;
1141 private->conf.len = conf_len;
1142 if (dasd_eckd_identify_conf_parts(&private->conf)) {
1143 private->conf.data = NULL;
1144 private->conf.len = 0;
1145 kfree(conf_data);
1146 continue;
1147 }
1148 /*
1149 * build device UID that other path data
1150 * can be compared to it
1151 */
1152 dasd_eckd_generate_uid(device);
1153 conf_data_saved++;
1154 } else if (dasd_eckd_check_cabling(device, conf_data, lpm)) {
1155 dasd_path_add_cablepm(device, lpm);
1156 path_err = -EINVAL;
1157 kfree(conf_data);
1158 continue;
1159 }
1160
1161 pos = pathmask_to_pos(lpm);
1162 dasd_eckd_store_conf_data(device, conf_data, pos);
1163
1164 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1165 case 0x02:
1166 dasd_path_add_nppm(device, lpm);
1167 break;
1168 case 0x03:
1169 dasd_path_add_ppm(device, lpm);
1170 break;
1171 }
1172 if (!dasd_path_get_opm(device)) {
1173 dasd_path_set_opm(device, lpm);
1174 dasd_generic_path_operational(device);
1175 } else {
1176 dasd_path_add_opm(device, lpm);
1177 }
1178 }
1179
1180 return path_err;
1181 }
1182
get_fcx_max_data(struct dasd_device * device)1183 static u32 get_fcx_max_data(struct dasd_device *device)
1184 {
1185 struct dasd_eckd_private *private = device->private;
1186 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1187 unsigned int mdc;
1188 int tpm;
1189
1190 if (dasd_nofcx)
1191 return 0;
1192 /* is transport mode supported? */
1193 fcx_in_css = css_general_characteristics.fcx;
1194 fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04;
1195 fcx_in_features = private->features.feature[40] & 0x80;
1196 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1197
1198 if (!tpm)
1199 return 0;
1200
1201 mdc = ccw_device_get_mdc(device->cdev, 0);
1202 if (mdc == 0) {
1203 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1204 return 0;
1205 } else {
1206 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1207 }
1208 }
1209
verify_fcx_max_data(struct dasd_device * device,__u8 lpm)1210 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1211 {
1212 struct dasd_eckd_private *private = device->private;
1213 unsigned int mdc;
1214 u32 fcx_max_data;
1215
1216 if (private->fcx_max_data) {
1217 mdc = ccw_device_get_mdc(device->cdev, lpm);
1218 if (mdc == 0) {
1219 dev_warn(&device->cdev->dev,
1220 "Detecting the maximum data size for zHPF "
1221 "requests failed (rc=%d) for a new path %x\n",
1222 mdc, lpm);
1223 return mdc;
1224 }
1225 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1226 if (fcx_max_data < private->fcx_max_data) {
1227 dev_warn(&device->cdev->dev,
1228 "The maximum data size for zHPF requests %u "
1229 "on a new path %x is below the active maximum "
1230 "%u\n", fcx_max_data, lpm,
1231 private->fcx_max_data);
1232 return -EACCES;
1233 }
1234 }
1235 return 0;
1236 }
1237
rebuild_device_uid(struct dasd_device * device,struct pe_handler_work_data * data)1238 static int rebuild_device_uid(struct dasd_device *device,
1239 struct pe_handler_work_data *data)
1240 {
1241 struct dasd_eckd_private *private = device->private;
1242 __u8 lpm, opm = dasd_path_get_opm(device);
1243 int rc = -ENODEV;
1244
1245 for (lpm = 0x80; lpm; lpm >>= 1) {
1246 if (!(lpm & opm))
1247 continue;
1248 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1249 memset(&data->cqr, 0, sizeof(data->cqr));
1250 data->cqr.cpaddr = &data->ccw;
1251 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1252 data->rcd_buffer,
1253 lpm);
1254
1255 if (rc) {
1256 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1257 continue;
1258 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1259 "Read configuration data "
1260 "returned error %d", rc);
1261 break;
1262 }
1263 memcpy(private->conf.data, data->rcd_buffer,
1264 DASD_ECKD_RCD_DATA_SIZE);
1265 if (dasd_eckd_identify_conf_parts(&private->conf)) {
1266 rc = -ENODEV;
1267 } else /* first valid path is enough */
1268 break;
1269 }
1270
1271 if (!rc)
1272 rc = dasd_eckd_generate_uid(device);
1273
1274 return rc;
1275 }
1276
dasd_eckd_path_available_action(struct dasd_device * device,struct pe_handler_work_data * data)1277 static void dasd_eckd_path_available_action(struct dasd_device *device,
1278 struct pe_handler_work_data *data)
1279 {
1280 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1281 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1282 struct dasd_conf_data *conf_data;
1283 char print_uid[DASD_UID_STRLEN];
1284 struct dasd_conf path_conf;
1285 unsigned long flags;
1286 int rc, pos;
1287
1288 opm = 0;
1289 npm = 0;
1290 ppm = 0;
1291 epm = 0;
1292 hpfpm = 0;
1293 cablepm = 0;
1294
1295 for (lpm = 0x80; lpm; lpm >>= 1) {
1296 if (!(lpm & data->tbvpm))
1297 continue;
1298 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1299 memset(&data->cqr, 0, sizeof(data->cqr));
1300 data->cqr.cpaddr = &data->ccw;
1301 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1302 data->rcd_buffer,
1303 lpm);
1304 if (!rc) {
1305 switch (dasd_eckd_path_access(data->rcd_buffer,
1306 DASD_ECKD_RCD_DATA_SIZE)
1307 ) {
1308 case 0x02:
1309 npm |= lpm;
1310 break;
1311 case 0x03:
1312 ppm |= lpm;
1313 break;
1314 }
1315 opm |= lpm;
1316 } else if (rc == -EOPNOTSUPP) {
1317 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1318 "path verification: No configuration "
1319 "data retrieved");
1320 opm |= lpm;
1321 } else if (rc == -EAGAIN) {
1322 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1323 "path verification: device is stopped,"
1324 " try again later");
1325 epm |= lpm;
1326 } else {
1327 dev_warn(&device->cdev->dev,
1328 "Reading device feature codes failed "
1329 "(rc=%d) for new path %x\n", rc, lpm);
1330 continue;
1331 }
1332 if (verify_fcx_max_data(device, lpm)) {
1333 opm &= ~lpm;
1334 npm &= ~lpm;
1335 ppm &= ~lpm;
1336 hpfpm |= lpm;
1337 continue;
1338 }
1339
1340 /*
1341 * save conf_data for comparison after
1342 * rebuild_device_uid may have changed
1343 * the original data
1344 */
1345 memcpy(&path_rcd_buf, data->rcd_buffer,
1346 DASD_ECKD_RCD_DATA_SIZE);
1347 path_conf.data = (void *)&path_rcd_buf;
1348 path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
1349 if (dasd_eckd_identify_conf_parts(&path_conf)) {
1350 path_conf.data = NULL;
1351 path_conf.len = 0;
1352 continue;
1353 }
1354
1355 /*
1356 * compare path UID with device UID only if at least
1357 * one valid path is left
1358 * in other case the device UID may have changed and
1359 * the first working path UID will be used as device UID
1360 */
1361 if (dasd_path_get_opm(device) &&
1362 dasd_eckd_compare_path_uid(device, &path_conf)) {
1363 /*
1364 * the comparison was not successful
1365 * rebuild the device UID with at least one
1366 * known path in case a z/VM hyperswap command
1367 * has changed the device
1368 *
1369 * after this compare again
1370 *
1371 * if either the rebuild or the recompare fails
1372 * the path can not be used
1373 */
1374 if (rebuild_device_uid(device, data) ||
1375 dasd_eckd_compare_path_uid(
1376 device, &path_conf)) {
1377 dasd_eckd_get_uid_string(&path_conf, print_uid);
1378 dev_err(&device->cdev->dev,
1379 "The newly added channel path %02X "
1380 "will not be used because it leads "
1381 "to a different device %s\n",
1382 lpm, print_uid);
1383 opm &= ~lpm;
1384 npm &= ~lpm;
1385 ppm &= ~lpm;
1386 cablepm |= lpm;
1387 continue;
1388 }
1389 }
1390
1391 conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
1392 if (conf_data) {
1393 memcpy(conf_data, data->rcd_buffer,
1394 DASD_ECKD_RCD_DATA_SIZE);
1395 } else {
1396 /*
1397 * path is operational but path config data could not
1398 * be stored due to low mem condition
1399 * add it to the error path mask and schedule a path
1400 * verification later that this could be added again
1401 */
1402 epm |= lpm;
1403 }
1404 pos = pathmask_to_pos(lpm);
1405 dasd_eckd_store_conf_data(device, conf_data, pos);
1406
1407 /*
1408 * There is a small chance that a path is lost again between
1409 * above path verification and the following modification of
1410 * the device opm mask. We could avoid that race here by using
1411 * yet another path mask, but we rather deal with this unlikely
1412 * situation in dasd_start_IO.
1413 */
1414 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1415 if (!dasd_path_get_opm(device) && opm) {
1416 dasd_path_set_opm(device, opm);
1417 dasd_generic_path_operational(device);
1418 } else {
1419 dasd_path_add_opm(device, opm);
1420 }
1421 dasd_path_add_nppm(device, npm);
1422 dasd_path_add_ppm(device, ppm);
1423 if (epm) {
1424 dasd_path_add_tbvpm(device, epm);
1425 dasd_device_set_timer(device, 50);
1426 }
1427 dasd_path_add_cablepm(device, cablepm);
1428 dasd_path_add_nohpfpm(device, hpfpm);
1429 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1430
1431 dasd_path_create_kobj(device, pos);
1432 }
1433 }
1434
do_pe_handler_work(struct work_struct * work)1435 static void do_pe_handler_work(struct work_struct *work)
1436 {
1437 struct pe_handler_work_data *data;
1438 struct dasd_device *device;
1439
1440 data = container_of(work, struct pe_handler_work_data, worker);
1441 device = data->device;
1442
1443 /* delay path verification until device was resumed */
1444 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1445 schedule_work(work);
1446 return;
1447 }
1448 /* check if path verification already running and delay if so */
1449 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1450 schedule_work(work);
1451 return;
1452 }
1453
1454 if (data->tbvpm)
1455 dasd_eckd_path_available_action(device, data);
1456 if (data->fcsecpm)
1457 dasd_eckd_read_fc_security(device);
1458
1459 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1460 dasd_put_device(device);
1461 if (data->isglobal)
1462 mutex_unlock(&dasd_pe_handler_mutex);
1463 else
1464 kfree(data);
1465 }
1466
dasd_eckd_pe_handler(struct dasd_device * device,__u8 tbvpm,__u8 fcsecpm)1467 static int dasd_eckd_pe_handler(struct dasd_device *device,
1468 __u8 tbvpm, __u8 fcsecpm)
1469 {
1470 struct pe_handler_work_data *data;
1471
1472 data = kzalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1473 if (!data) {
1474 if (mutex_trylock(&dasd_pe_handler_mutex)) {
1475 data = pe_handler_worker;
1476 data->isglobal = 1;
1477 } else {
1478 return -ENOMEM;
1479 }
1480 }
1481 INIT_WORK(&data->worker, do_pe_handler_work);
1482 dasd_get_device(device);
1483 data->device = device;
1484 data->tbvpm = tbvpm;
1485 data->fcsecpm = fcsecpm;
1486 schedule_work(&data->worker);
1487 return 0;
1488 }
1489
dasd_eckd_reset_path(struct dasd_device * device,__u8 pm)1490 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1491 {
1492 struct dasd_eckd_private *private = device->private;
1493 unsigned long flags;
1494
1495 if (!private->fcx_max_data)
1496 private->fcx_max_data = get_fcx_max_data(device);
1497 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1498 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1499 dasd_schedule_device_bh(device);
1500 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1501 }
1502
dasd_eckd_read_features(struct dasd_device * device)1503 static int dasd_eckd_read_features(struct dasd_device *device)
1504 {
1505 struct dasd_eckd_private *private = device->private;
1506 struct dasd_psf_prssd_data *prssdp;
1507 struct dasd_rssd_features *features;
1508 struct dasd_ccw_req *cqr;
1509 struct ccw1 *ccw;
1510 int rc;
1511
1512 memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1513 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1514 (sizeof(struct dasd_psf_prssd_data) +
1515 sizeof(struct dasd_rssd_features)),
1516 device, NULL);
1517 if (IS_ERR(cqr)) {
1518 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1519 "allocate initialization request");
1520 return PTR_ERR(cqr);
1521 }
1522 cqr->startdev = device;
1523 cqr->memdev = device;
1524 cqr->block = NULL;
1525 cqr->retries = 256;
1526 cqr->expires = 10 * HZ;
1527
1528 /* Prepare for Read Subsystem Data */
1529 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1530 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1531 prssdp->order = PSF_ORDER_PRSSD;
1532 prssdp->suborder = 0x41; /* Read Feature Codes */
1533 /* all other bytes of prssdp must be zero */
1534
1535 ccw = cqr->cpaddr;
1536 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1537 ccw->count = sizeof(struct dasd_psf_prssd_data);
1538 ccw->flags |= CCW_FLAG_CC;
1539 ccw->cda = virt_to_dma32(prssdp);
1540
1541 /* Read Subsystem Data - feature codes */
1542 features = (struct dasd_rssd_features *) (prssdp + 1);
1543 memset(features, 0, sizeof(struct dasd_rssd_features));
1544
1545 ccw++;
1546 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1547 ccw->count = sizeof(struct dasd_rssd_features);
1548 ccw->cda = virt_to_dma32(features);
1549
1550 cqr->buildclk = get_tod_clock();
1551 cqr->status = DASD_CQR_FILLED;
1552 rc = dasd_sleep_on(cqr);
1553 if (rc == 0) {
1554 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1555 features = (struct dasd_rssd_features *) (prssdp + 1);
1556 memcpy(&private->features, features,
1557 sizeof(struct dasd_rssd_features));
1558 } else
1559 dev_warn(&device->cdev->dev, "Reading device feature codes"
1560 " failed with rc=%d\n", rc);
1561 dasd_sfree_request(cqr, cqr->memdev);
1562 return rc;
1563 }
1564
1565 /* Read Volume Information - Volume Storage Query */
dasd_eckd_read_vol_info(struct dasd_device * device)1566 static int dasd_eckd_read_vol_info(struct dasd_device *device)
1567 {
1568 struct dasd_eckd_private *private = device->private;
1569 struct dasd_psf_prssd_data *prssdp;
1570 struct dasd_rssd_vsq *vsq;
1571 struct dasd_ccw_req *cqr;
1572 struct ccw1 *ccw;
1573 int useglobal;
1574 int rc;
1575
1576 /* This command cannot be executed on an alias device */
1577 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1578 private->uid.type == UA_HYPER_PAV_ALIAS)
1579 return 0;
1580
1581 useglobal = 0;
1582 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1583 sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1584 if (IS_ERR(cqr)) {
1585 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1586 "Could not allocate initialization request");
1587 mutex_lock(&dasd_vol_info_mutex);
1588 useglobal = 1;
1589 cqr = &dasd_vol_info_req->cqr;
1590 memset(cqr, 0, sizeof(*cqr));
1591 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
1592 cqr->cpaddr = &dasd_vol_info_req->ccw;
1593 cqr->data = &dasd_vol_info_req->data;
1594 cqr->magic = DASD_ECKD_MAGIC;
1595 }
1596
1597 /* Prepare for Read Subsystem Data */
1598 prssdp = cqr->data;
1599 prssdp->order = PSF_ORDER_PRSSD;
1600 prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
1601 prssdp->lss = private->conf.ned->ID;
1602 prssdp->volume = private->conf.ned->unit_addr;
1603
1604 ccw = cqr->cpaddr;
1605 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1606 ccw->count = sizeof(*prssdp);
1607 ccw->flags |= CCW_FLAG_CC;
1608 ccw->cda = virt_to_dma32(prssdp);
1609
1610 /* Read Subsystem Data - Volume Storage Query */
1611 vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1612 memset(vsq, 0, sizeof(*vsq));
1613
1614 ccw++;
1615 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1616 ccw->count = sizeof(*vsq);
1617 ccw->flags |= CCW_FLAG_SLI;
1618 ccw->cda = virt_to_dma32(vsq);
1619
1620 cqr->buildclk = get_tod_clock();
1621 cqr->status = DASD_CQR_FILLED;
1622 cqr->startdev = device;
1623 cqr->memdev = device;
1624 cqr->block = NULL;
1625 cqr->retries = 256;
1626 cqr->expires = device->default_expires * HZ;
1627 /* The command might not be supported. Suppress the error output */
1628 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1629
1630 rc = dasd_sleep_on_interruptible(cqr);
1631 if (rc == 0) {
1632 memcpy(&private->vsq, vsq, sizeof(*vsq));
1633 } else {
1634 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1635 "Reading the volume storage information failed with rc=%d", rc);
1636 }
1637
1638 if (useglobal)
1639 mutex_unlock(&dasd_vol_info_mutex);
1640 else
1641 dasd_sfree_request(cqr, cqr->memdev);
1642
1643 return rc;
1644 }
1645
dasd_eckd_is_ese(struct dasd_device * device)1646 static int dasd_eckd_is_ese(struct dasd_device *device)
1647 {
1648 struct dasd_eckd_private *private = device->private;
1649
1650 return private->vsq.vol_info.ese;
1651 }
1652
dasd_eckd_ext_pool_id(struct dasd_device * device)1653 static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1654 {
1655 struct dasd_eckd_private *private = device->private;
1656
1657 return private->vsq.extent_pool_id;
1658 }
1659
1660 /*
1661 * This value represents the total amount of available space. As more space is
1662 * allocated by ESE volumes, this value will decrease.
1663 * The data for this value is therefore updated on any call.
1664 */
dasd_eckd_space_configured(struct dasd_device * device)1665 static int dasd_eckd_space_configured(struct dasd_device *device)
1666 {
1667 struct dasd_eckd_private *private = device->private;
1668 int rc;
1669
1670 rc = dasd_eckd_read_vol_info(device);
1671
1672 return rc ? : private->vsq.space_configured;
1673 }
1674
1675 /*
1676 * The value of space allocated by an ESE volume may have changed and is
1677 * therefore updated on any call.
1678 */
dasd_eckd_space_allocated(struct dasd_device * device)1679 static int dasd_eckd_space_allocated(struct dasd_device *device)
1680 {
1681 struct dasd_eckd_private *private = device->private;
1682 int rc;
1683
1684 rc = dasd_eckd_read_vol_info(device);
1685
1686 return rc ? : private->vsq.space_allocated;
1687 }
1688
dasd_eckd_logical_capacity(struct dasd_device * device)1689 static int dasd_eckd_logical_capacity(struct dasd_device *device)
1690 {
1691 struct dasd_eckd_private *private = device->private;
1692
1693 return private->vsq.logical_capacity;
1694 }
1695
dasd_eckd_ext_pool_exhaust_work(struct work_struct * work)1696 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
1697 {
1698 struct ext_pool_exhaust_work_data *data;
1699 struct dasd_device *device;
1700 struct dasd_device *base;
1701
1702 data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1703 device = data->device;
1704 base = data->base;
1705
1706 if (!base)
1707 base = device;
1708 if (dasd_eckd_space_configured(base) != 0) {
1709 dasd_generic_space_avail(device);
1710 } else {
1711 dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
1712 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
1713 }
1714
1715 dasd_put_device(device);
1716 kfree(data);
1717 }
1718
dasd_eckd_ext_pool_exhaust(struct dasd_device * device,struct dasd_ccw_req * cqr)1719 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
1720 struct dasd_ccw_req *cqr)
1721 {
1722 struct ext_pool_exhaust_work_data *data;
1723
1724 data = kzalloc(sizeof(*data), GFP_ATOMIC);
1725 if (!data)
1726 return -ENOMEM;
1727 INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1728 dasd_get_device(device);
1729 data->device = device;
1730
1731 if (cqr->block)
1732 data->base = cqr->block->base;
1733 else if (cqr->basedev)
1734 data->base = cqr->basedev;
1735 else
1736 data->base = NULL;
1737
1738 schedule_work(&data->worker);
1739
1740 return 0;
1741 }
1742
dasd_eckd_cpy_ext_pool_data(struct dasd_device * device,struct dasd_rssd_lcq * lcq)1743 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1744 struct dasd_rssd_lcq *lcq)
1745 {
1746 struct dasd_eckd_private *private = device->private;
1747 int pool_id = dasd_eckd_ext_pool_id(device);
1748 struct dasd_ext_pool_sum eps;
1749 int i;
1750
1751 for (i = 0; i < lcq->pool_count; i++) {
1752 eps = lcq->ext_pool_sum[i];
1753 if (eps.pool_id == pool_id) {
1754 memcpy(&private->eps, &eps,
1755 sizeof(struct dasd_ext_pool_sum));
1756 }
1757 }
1758 }
1759
1760 /* Read Extent Pool Information - Logical Configuration Query */
dasd_eckd_read_ext_pool_info(struct dasd_device * device)1761 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1762 {
1763 struct dasd_eckd_private *private = device->private;
1764 struct dasd_psf_prssd_data *prssdp;
1765 struct dasd_rssd_lcq *lcq;
1766 struct dasd_ccw_req *cqr;
1767 struct ccw1 *ccw;
1768 int rc;
1769
1770 /* This command cannot be executed on an alias device */
1771 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1772 private->uid.type == UA_HYPER_PAV_ALIAS)
1773 return 0;
1774
1775 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1776 sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1777 if (IS_ERR(cqr)) {
1778 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1779 "Could not allocate initialization request");
1780 return PTR_ERR(cqr);
1781 }
1782
1783 /* Prepare for Read Subsystem Data */
1784 prssdp = cqr->data;
1785 memset(prssdp, 0, sizeof(*prssdp));
1786 prssdp->order = PSF_ORDER_PRSSD;
1787 prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */
1788
1789 ccw = cqr->cpaddr;
1790 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1791 ccw->count = sizeof(*prssdp);
1792 ccw->flags |= CCW_FLAG_CC;
1793 ccw->cda = virt_to_dma32(prssdp);
1794
1795 lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1796 memset(lcq, 0, sizeof(*lcq));
1797
1798 ccw++;
1799 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1800 ccw->count = sizeof(*lcq);
1801 ccw->flags |= CCW_FLAG_SLI;
1802 ccw->cda = virt_to_dma32(lcq);
1803
1804 cqr->buildclk = get_tod_clock();
1805 cqr->status = DASD_CQR_FILLED;
1806 cqr->startdev = device;
1807 cqr->memdev = device;
1808 cqr->block = NULL;
1809 cqr->retries = 256;
1810 cqr->expires = device->default_expires * HZ;
1811 /* The command might not be supported. Suppress the error output */
1812 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1813
1814 rc = dasd_sleep_on_interruptible(cqr);
1815 if (rc == 0) {
1816 dasd_eckd_cpy_ext_pool_data(device, lcq);
1817 } else {
1818 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1819 "Reading the logical configuration failed with rc=%d", rc);
1820 }
1821
1822 dasd_sfree_request(cqr, cqr->memdev);
1823
1824 return rc;
1825 }
1826
1827 /*
1828 * Depending on the device type, the extent size is specified either as
1829 * cylinders per extent (CKD) or size per extent (FBA)
1830 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1831 */
dasd_eckd_ext_size(struct dasd_device * device)1832 static int dasd_eckd_ext_size(struct dasd_device *device)
1833 {
1834 struct dasd_eckd_private *private = device->private;
1835 struct dasd_ext_pool_sum eps = private->eps;
1836
1837 if (!eps.flags.extent_size_valid)
1838 return 0;
1839 if (eps.extent_size.size_1G)
1840 return 1113;
1841 if (eps.extent_size.size_16M)
1842 return 21;
1843
1844 return 0;
1845 }
1846
dasd_eckd_ext_pool_warn_thrshld(struct dasd_device * device)1847 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1848 {
1849 struct dasd_eckd_private *private = device->private;
1850
1851 return private->eps.warn_thrshld;
1852 }
1853
dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device * device)1854 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1855 {
1856 struct dasd_eckd_private *private = device->private;
1857
1858 return private->eps.flags.capacity_at_warnlevel;
1859 }
1860
1861 /*
1862 * Extent Pool out of space
1863 */
dasd_eckd_ext_pool_oos(struct dasd_device * device)1864 static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1865 {
1866 struct dasd_eckd_private *private = device->private;
1867
1868 return private->eps.flags.pool_oos;
1869 }
1870
1871 /*
1872 * Build CP for Perform Subsystem Function - SSC.
1873 */
dasd_eckd_build_psf_ssc(struct dasd_device * device,int enable_pav)1874 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1875 int enable_pav)
1876 {
1877 struct dasd_ccw_req *cqr;
1878 struct dasd_psf_ssc_data *psf_ssc_data;
1879 struct ccw1 *ccw;
1880
1881 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1882 sizeof(struct dasd_psf_ssc_data),
1883 device, NULL);
1884
1885 if (IS_ERR(cqr)) {
1886 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1887 "Could not allocate PSF-SSC request");
1888 return cqr;
1889 }
1890 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1891 psf_ssc_data->order = PSF_ORDER_SSC;
1892 psf_ssc_data->suborder = 0xc0;
1893 if (enable_pav) {
1894 psf_ssc_data->suborder |= 0x08;
1895 psf_ssc_data->reserved[0] = 0x88;
1896 }
1897 ccw = cqr->cpaddr;
1898 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1899 ccw->cda = virt_to_dma32(psf_ssc_data);
1900 ccw->count = 66;
1901
1902 cqr->startdev = device;
1903 cqr->memdev = device;
1904 cqr->block = NULL;
1905 cqr->retries = 256;
1906 cqr->expires = 10*HZ;
1907 cqr->buildclk = get_tod_clock();
1908 cqr->status = DASD_CQR_FILLED;
1909 return cqr;
1910 }
1911
1912 /*
1913 * Perform Subsystem Function.
1914 * It is necessary to trigger CIO for channel revalidation since this
1915 * call might change behaviour of DASD devices.
1916 */
1917 static int
dasd_eckd_psf_ssc(struct dasd_device * device,int enable_pav,unsigned long flags)1918 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1919 unsigned long flags)
1920 {
1921 struct dasd_ccw_req *cqr;
1922 int rc;
1923
1924 cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1925 if (IS_ERR(cqr))
1926 return PTR_ERR(cqr);
1927
1928 /*
1929 * set flags e.g. turn on failfast, to prevent blocking
1930 * the calling function should handle failed requests
1931 */
1932 cqr->flags |= flags;
1933
1934 rc = dasd_sleep_on(cqr);
1935 if (!rc)
1936 /* trigger CIO to reprobe devices */
1937 css_schedule_reprobe();
1938 else if (cqr->intrc == -EAGAIN)
1939 rc = -EAGAIN;
1940
1941 dasd_sfree_request(cqr, cqr->memdev);
1942 return rc;
1943 }
1944
1945 /*
1946 * Valide storage server of current device.
1947 */
dasd_eckd_validate_server(struct dasd_device * device,unsigned long flags)1948 static int dasd_eckd_validate_server(struct dasd_device *device,
1949 unsigned long flags)
1950 {
1951 struct dasd_eckd_private *private = device->private;
1952 int enable_pav, rc;
1953
1954 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1955 private->uid.type == UA_HYPER_PAV_ALIAS)
1956 return 0;
1957 if (dasd_nopav || machine_is_vm())
1958 enable_pav = 0;
1959 else
1960 enable_pav = 1;
1961 rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1962
1963 /* may be requested feature is not available on server,
1964 * therefore just report error and go ahead */
1965 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1966 "returned rc=%d", private->uid.ssid, rc);
1967 return rc;
1968 }
1969
1970 /*
1971 * worker to do a validate server in case of a lost pathgroup
1972 */
dasd_eckd_do_validate_server(struct work_struct * work)1973 static void dasd_eckd_do_validate_server(struct work_struct *work)
1974 {
1975 struct dasd_device *device = container_of(work, struct dasd_device,
1976 kick_validate);
1977 unsigned long flags = 0;
1978
1979 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1980 if (dasd_eckd_validate_server(device, flags)
1981 == -EAGAIN) {
1982 /* schedule worker again if failed */
1983 schedule_work(&device->kick_validate);
1984 return;
1985 }
1986
1987 dasd_put_device(device);
1988 }
1989
dasd_eckd_kick_validate_server(struct dasd_device * device)1990 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
1991 {
1992 dasd_get_device(device);
1993 /* exit if device not online or in offline processing */
1994 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1995 device->state < DASD_STATE_ONLINE) {
1996 dasd_put_device(device);
1997 return;
1998 }
1999 /* queue call to do_validate_server to the kernel event daemon. */
2000 if (!schedule_work(&device->kick_validate))
2001 dasd_put_device(device);
2002 }
2003
2004 /*
2005 * return if the device is the copy relation primary if a copy relation is active
2006 */
dasd_device_is_primary(struct dasd_device * device)2007 static int dasd_device_is_primary(struct dasd_device *device)
2008 {
2009 if (!device->copy)
2010 return 1;
2011
2012 if (device->copy->active->device == device)
2013 return 1;
2014
2015 return 0;
2016 }
2017
dasd_eckd_alloc_block(struct dasd_device * device)2018 static int dasd_eckd_alloc_block(struct dasd_device *device)
2019 {
2020 struct dasd_block *block;
2021 struct dasd_uid temp_uid;
2022
2023 if (!dasd_device_is_primary(device))
2024 return 0;
2025
2026 dasd_eckd_get_uid(device, &temp_uid);
2027 if (temp_uid.type == UA_BASE_DEVICE) {
2028 block = dasd_alloc_block();
2029 if (IS_ERR(block)) {
2030 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
2031 "could not allocate dasd block structure");
2032 return PTR_ERR(block);
2033 }
2034 device->block = block;
2035 block->base = device;
2036 }
2037 return 0;
2038 }
2039
dasd_eckd_pprc_enabled(struct dasd_device * device)2040 static bool dasd_eckd_pprc_enabled(struct dasd_device *device)
2041 {
2042 struct dasd_eckd_private *private = device->private;
2043
2044 return private->rdc_data.facilities.PPRC_enabled;
2045 }
2046
2047 /*
2048 * Check device characteristics.
2049 * If the device is accessible using ECKD discipline, the device is enabled.
2050 */
2051 static int
dasd_eckd_check_characteristics(struct dasd_device * device)2052 dasd_eckd_check_characteristics(struct dasd_device *device)
2053 {
2054 struct dasd_eckd_private *private = device->private;
2055 int rc, i;
2056 int readonly;
2057 unsigned long value;
2058
2059 /* setup work queue for validate server*/
2060 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
2061 /* setup work queue for summary unit check */
2062 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
2063
2064 if (!ccw_device_is_pathgroup(device->cdev)) {
2065 dev_warn(&device->cdev->dev,
2066 "A channel path group could not be established\n");
2067 return -EIO;
2068 }
2069 if (!ccw_device_is_multipath(device->cdev)) {
2070 dev_info(&device->cdev->dev,
2071 "The DASD is not operating in multipath mode\n");
2072 }
2073 if (!private) {
2074 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
2075 if (!private) {
2076 dev_warn(&device->cdev->dev,
2077 "Allocating memory for private DASD data "
2078 "failed\n");
2079 return -ENOMEM;
2080 }
2081 device->private = private;
2082 } else {
2083 memset(private, 0, sizeof(*private));
2084 }
2085 /* Invalidate status of initial analysis. */
2086 private->init_cqr_status = -1;
2087 /* Set default cache operations. */
2088 private->attrib.operation = DASD_NORMAL_CACHE;
2089 private->attrib.nr_cyl = 0;
2090
2091 /* Read Configuration Data */
2092 rc = dasd_eckd_read_conf(device);
2093 if (rc)
2094 goto out_err1;
2095
2096 /* set some default values */
2097 device->default_expires = DASD_EXPIRES;
2098 device->default_retries = DASD_RETRIES;
2099 device->path_thrhld = DASD_ECKD_PATH_THRHLD;
2100 device->path_interval = DASD_ECKD_PATH_INTERVAL;
2101 device->aq_timeouts = DASD_RETRIES_MAX;
2102
2103 if (private->conf.gneq) {
2104 value = 1;
2105 for (i = 0; i < private->conf.gneq->timeout.value; i++)
2106 value = 10 * value;
2107 value = value * private->conf.gneq->timeout.number;
2108 /* do not accept useless values */
2109 if (value != 0 && value <= DASD_EXPIRES_MAX)
2110 device->default_expires = value;
2111 }
2112
2113 /* Read Device Characteristics */
2114 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
2115 &private->rdc_data, 64);
2116 if (rc) {
2117 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2118 "Read device characteristic failed, rc=%d", rc);
2119 goto out_err1;
2120 }
2121
2122 /* setup PPRC for device from devmap */
2123 rc = dasd_devmap_set_device_copy_relation(device->cdev,
2124 dasd_eckd_pprc_enabled(device));
2125 if (rc) {
2126 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2127 "copy relation setup failed, rc=%d", rc);
2128 goto out_err1;
2129 }
2130
2131 /* check if block device is needed and allocate in case */
2132 rc = dasd_eckd_alloc_block(device);
2133 if (rc)
2134 goto out_err1;
2135
2136 /* register lcu with alias handling, enable PAV */
2137 rc = dasd_alias_make_device_known_to_lcu(device);
2138 if (rc)
2139 goto out_err2;
2140
2141 dasd_eckd_validate_server(device, 0);
2142
2143 /* device may report different configuration data after LCU setup */
2144 rc = dasd_eckd_read_conf(device);
2145 if (rc)
2146 goto out_err3;
2147
2148 dasd_eckd_read_fc_security(device);
2149 dasd_path_create_kobjects(device);
2150
2151 /* Read Feature Codes */
2152 dasd_eckd_read_features(device);
2153
2154 /* Read Volume Information */
2155 dasd_eckd_read_vol_info(device);
2156
2157 /* Read Extent Pool Information */
2158 dasd_eckd_read_ext_pool_info(device);
2159
2160 if ((device->features & DASD_FEATURE_USERAW) &&
2161 !(private->rdc_data.facilities.RT_in_LR)) {
2162 dev_err(&device->cdev->dev, "The storage server does not "
2163 "support raw-track access\n");
2164 rc = -EINVAL;
2165 goto out_err3;
2166 }
2167
2168 /* find the valid cylinder size */
2169 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
2170 private->rdc_data.long_no_cyl)
2171 private->real_cyl = private->rdc_data.long_no_cyl;
2172 else
2173 private->real_cyl = private->rdc_data.no_cyl;
2174
2175 private->fcx_max_data = get_fcx_max_data(device);
2176
2177 readonly = dasd_device_is_ro(device);
2178 if (readonly)
2179 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2180
2181 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2182 "with %d cylinders, %d heads, %d sectors%s\n",
2183 private->rdc_data.dev_type,
2184 private->rdc_data.dev_model,
2185 private->rdc_data.cu_type,
2186 private->rdc_data.cu_model.model,
2187 private->real_cyl,
2188 private->rdc_data.trk_per_cyl,
2189 private->rdc_data.sec_per_trk,
2190 readonly ? ", read-only device" : "");
2191 return 0;
2192
2193 out_err3:
2194 dasd_alias_disconnect_device_from_lcu(device);
2195 out_err2:
2196 dasd_free_block(device->block);
2197 device->block = NULL;
2198 out_err1:
2199 dasd_eckd_clear_conf_data(device);
2200 dasd_path_remove_kobjects(device);
2201 kfree(device->private);
2202 device->private = NULL;
2203 return rc;
2204 }
2205
dasd_eckd_uncheck_device(struct dasd_device * device)2206 static void dasd_eckd_uncheck_device(struct dasd_device *device)
2207 {
2208 struct dasd_eckd_private *private = device->private;
2209
2210 if (!private)
2211 return;
2212
2213 dasd_alias_disconnect_device_from_lcu(device);
2214 private->conf.ned = NULL;
2215 private->conf.sneq = NULL;
2216 private->conf.vdsneq = NULL;
2217 private->conf.gneq = NULL;
2218 dasd_eckd_clear_conf_data(device);
2219 dasd_path_remove_kobjects(device);
2220 }
2221
2222 static struct dasd_ccw_req *
dasd_eckd_analysis_ccw(struct dasd_device * device)2223 dasd_eckd_analysis_ccw(struct dasd_device *device)
2224 {
2225 struct dasd_eckd_private *private = device->private;
2226 struct eckd_count *count_data;
2227 struct LO_eckd_data *LO_data;
2228 struct dasd_ccw_req *cqr;
2229 struct ccw1 *ccw;
2230 int cplength, datasize;
2231 int i;
2232
2233 cplength = 8;
2234 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2235 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2236 NULL);
2237 if (IS_ERR(cqr))
2238 return cqr;
2239 ccw = cqr->cpaddr;
2240 /* Define extent for the first 2 tracks. */
2241 define_extent(ccw++, cqr->data, 0, 1,
2242 DASD_ECKD_CCW_READ_COUNT, device, 0);
2243 LO_data = cqr->data + sizeof(struct DE_eckd_data);
2244 /* Locate record for the first 4 records on track 0. */
2245 ccw[-1].flags |= CCW_FLAG_CC;
2246 locate_record(ccw++, LO_data++, 0, 0, 4,
2247 DASD_ECKD_CCW_READ_COUNT, device, 0);
2248
2249 count_data = private->count_area;
2250 for (i = 0; i < 4; i++) {
2251 ccw[-1].flags |= CCW_FLAG_CC;
2252 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2253 ccw->flags = 0;
2254 ccw->count = 8;
2255 ccw->cda = virt_to_dma32(count_data);
2256 ccw++;
2257 count_data++;
2258 }
2259
2260 /* Locate record for the first record on track 1. */
2261 ccw[-1].flags |= CCW_FLAG_CC;
2262 locate_record(ccw++, LO_data++, 1, 0, 1,
2263 DASD_ECKD_CCW_READ_COUNT, device, 0);
2264 /* Read count ccw. */
2265 ccw[-1].flags |= CCW_FLAG_CC;
2266 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2267 ccw->flags = 0;
2268 ccw->count = 8;
2269 ccw->cda = virt_to_dma32(count_data);
2270
2271 cqr->block = NULL;
2272 cqr->startdev = device;
2273 cqr->memdev = device;
2274 cqr->retries = 255;
2275 cqr->buildclk = get_tod_clock();
2276 cqr->status = DASD_CQR_FILLED;
2277 /* Set flags to suppress output for expected errors */
2278 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2279 set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
2280
2281 return cqr;
2282 }
2283
2284 /* differentiate between 'no record found' and any other error */
dasd_eckd_analysis_evaluation(struct dasd_ccw_req * init_cqr)2285 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2286 {
2287 char *sense;
2288 if (init_cqr->status == DASD_CQR_DONE)
2289 return INIT_CQR_OK;
2290 else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2291 init_cqr->status == DASD_CQR_FAILED) {
2292 sense = dasd_get_sense(&init_cqr->irb);
2293 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2294 return INIT_CQR_UNFORMATTED;
2295 else
2296 return INIT_CQR_ERROR;
2297 } else
2298 return INIT_CQR_ERROR;
2299 }
2300
2301 /*
2302 * This is the callback function for the init_analysis cqr. It saves
2303 * the status of the initial analysis ccw before it frees it and kicks
2304 * the device to continue the startup sequence. This will call
2305 * dasd_eckd_do_analysis again (if the devices has not been marked
2306 * for deletion in the meantime).
2307 */
dasd_eckd_analysis_callback(struct dasd_ccw_req * init_cqr,void * data)2308 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2309 void *data)
2310 {
2311 struct dasd_device *device = init_cqr->startdev;
2312 struct dasd_eckd_private *private = device->private;
2313
2314 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2315 dasd_sfree_request(init_cqr, device);
2316 dasd_kick_device(device);
2317 }
2318
dasd_eckd_start_analysis(struct dasd_block * block)2319 static int dasd_eckd_start_analysis(struct dasd_block *block)
2320 {
2321 struct dasd_ccw_req *init_cqr;
2322
2323 init_cqr = dasd_eckd_analysis_ccw(block->base);
2324 if (IS_ERR(init_cqr))
2325 return PTR_ERR(init_cqr);
2326 init_cqr->callback = dasd_eckd_analysis_callback;
2327 init_cqr->callback_data = NULL;
2328 init_cqr->expires = 5*HZ;
2329 /* first try without ERP, so we can later handle unformatted
2330 * devices as special case
2331 */
2332 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2333 init_cqr->retries = 0;
2334 dasd_add_request_head(init_cqr);
2335 return -EAGAIN;
2336 }
2337
dasd_eckd_end_analysis(struct dasd_block * block)2338 static int dasd_eckd_end_analysis(struct dasd_block *block)
2339 {
2340 struct dasd_device *device = block->base;
2341 struct dasd_eckd_private *private = device->private;
2342 struct eckd_count *count_area;
2343 unsigned int sb, blk_per_trk;
2344 int status, i;
2345 struct dasd_ccw_req *init_cqr;
2346
2347 status = private->init_cqr_status;
2348 private->init_cqr_status = -1;
2349 if (status == INIT_CQR_ERROR) {
2350 /* try again, this time with full ERP */
2351 init_cqr = dasd_eckd_analysis_ccw(device);
2352 dasd_sleep_on(init_cqr);
2353 status = dasd_eckd_analysis_evaluation(init_cqr);
2354 dasd_sfree_request(init_cqr, device);
2355 }
2356
2357 if (device->features & DASD_FEATURE_USERAW) {
2358 block->bp_block = DASD_RAW_BLOCKSIZE;
2359 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2360 block->s2b_shift = 3;
2361 goto raw;
2362 }
2363
2364 if (status == INIT_CQR_UNFORMATTED) {
2365 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2366 return -EMEDIUMTYPE;
2367 } else if (status == INIT_CQR_ERROR) {
2368 dev_err(&device->cdev->dev,
2369 "Detecting the DASD disk layout failed because "
2370 "of an I/O error\n");
2371 return -EIO;
2372 }
2373
2374 private->uses_cdl = 1;
2375 /* Check Track 0 for Compatible Disk Layout */
2376 count_area = NULL;
2377 for (i = 0; i < 3; i++) {
2378 if (private->count_area[i].kl != 4 ||
2379 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2380 private->count_area[i].cyl != 0 ||
2381 private->count_area[i].head != count_area_head[i] ||
2382 private->count_area[i].record != count_area_rec[i]) {
2383 private->uses_cdl = 0;
2384 break;
2385 }
2386 }
2387 if (i == 3)
2388 count_area = &private->count_area[3];
2389
2390 if (private->uses_cdl == 0) {
2391 for (i = 0; i < 5; i++) {
2392 if ((private->count_area[i].kl != 0) ||
2393 (private->count_area[i].dl !=
2394 private->count_area[0].dl) ||
2395 private->count_area[i].cyl != 0 ||
2396 private->count_area[i].head != count_area_head[i] ||
2397 private->count_area[i].record != count_area_rec[i])
2398 break;
2399 }
2400 if (i == 5)
2401 count_area = &private->count_area[0];
2402 } else {
2403 if (private->count_area[3].record == 1)
2404 dev_warn(&device->cdev->dev,
2405 "Track 0 has no records following the VTOC\n");
2406 }
2407
2408 if (count_area != NULL && count_area->kl == 0) {
2409 /* we found nothing violating our disk layout */
2410 if (dasd_check_blocksize(count_area->dl) == 0)
2411 block->bp_block = count_area->dl;
2412 }
2413 if (block->bp_block == 0) {
2414 dev_warn(&device->cdev->dev,
2415 "The disk layout of the DASD is not supported\n");
2416 return -EMEDIUMTYPE;
2417 }
2418 block->s2b_shift = 0; /* bits to shift 512 to get a block */
2419 for (sb = 512; sb < block->bp_block; sb = sb << 1)
2420 block->s2b_shift++;
2421
2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2423
2424 raw:
2425 block->blocks = ((unsigned long) private->real_cyl *
2426 private->rdc_data.trk_per_cyl *
2427 blk_per_trk);
2428
2429 dev_info(&device->cdev->dev,
2430 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2431 "%s\n", (block->bp_block >> 10),
2432 (((unsigned long) private->real_cyl *
2433 private->rdc_data.trk_per_cyl *
2434 blk_per_trk * (block->bp_block >> 9)) >> 1),
2435 ((blk_per_trk * block->bp_block) >> 10),
2436 private->uses_cdl ?
2437 "compatible disk layout" : "linux disk layout");
2438
2439 return 0;
2440 }
2441
dasd_eckd_do_analysis(struct dasd_block * block)2442 static int dasd_eckd_do_analysis(struct dasd_block *block)
2443 {
2444 struct dasd_eckd_private *private = block->base->private;
2445
2446 if (private->init_cqr_status < 0)
2447 return dasd_eckd_start_analysis(block);
2448 else
2449 return dasd_eckd_end_analysis(block);
2450 }
2451
dasd_eckd_basic_to_ready(struct dasd_device * device)2452 static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2453 {
2454 return dasd_alias_add_device(device);
2455 };
2456
dasd_eckd_online_to_ready(struct dasd_device * device)2457 static int dasd_eckd_online_to_ready(struct dasd_device *device)
2458 {
2459 if (cancel_work_sync(&device->reload_device))
2460 dasd_put_device(device);
2461 if (cancel_work_sync(&device->kick_validate))
2462 dasd_put_device(device);
2463
2464 return 0;
2465 };
2466
dasd_eckd_basic_to_known(struct dasd_device * device)2467 static int dasd_eckd_basic_to_known(struct dasd_device *device)
2468 {
2469 return dasd_alias_remove_device(device);
2470 };
2471
2472 static int
dasd_eckd_fill_geometry(struct dasd_block * block,struct hd_geometry * geo)2473 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2474 {
2475 struct dasd_eckd_private *private = block->base->private;
2476
2477 if (dasd_check_blocksize(block->bp_block) == 0) {
2478 geo->sectors = recs_per_track(&private->rdc_data,
2479 0, block->bp_block);
2480 }
2481 geo->cylinders = private->rdc_data.no_cyl;
2482 geo->heads = private->rdc_data.trk_per_cyl;
2483 return 0;
2484 }
2485
2486 /*
2487 * Build the TCW request for the format check
2488 */
2489 static struct dasd_ccw_req *
dasd_eckd_build_check_tcw(struct dasd_device * base,struct format_data_t * fdata,int enable_pav,struct eckd_count * fmt_buffer,int rpt)2490 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2491 int enable_pav, struct eckd_count *fmt_buffer,
2492 int rpt)
2493 {
2494 struct dasd_eckd_private *start_priv;
2495 struct dasd_device *startdev = NULL;
2496 struct tidaw *last_tidaw = NULL;
2497 struct dasd_ccw_req *cqr;
2498 struct itcw *itcw;
2499 int itcw_size;
2500 int count;
2501 int rc;
2502 int i;
2503
2504 if (enable_pav)
2505 startdev = dasd_alias_get_start_dev(base);
2506
2507 if (!startdev)
2508 startdev = base;
2509
2510 start_priv = startdev->private;
2511
2512 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2513
2514 /*
2515 * we're adding 'count' amount of tidaw to the itcw.
2516 * calculate the corresponding itcw_size
2517 */
2518 itcw_size = itcw_calc_size(0, count, 0);
2519
2520 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2521 if (IS_ERR(cqr))
2522 return cqr;
2523
2524 start_priv->count++;
2525
2526 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2527 if (IS_ERR(itcw)) {
2528 rc = -EINVAL;
2529 goto out_err;
2530 }
2531
2532 cqr->cpaddr = itcw_get_tcw(itcw);
2533 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2534 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2535 sizeof(struct eckd_count),
2536 count * sizeof(struct eckd_count), 0, rpt);
2537 if (rc)
2538 goto out_err;
2539
2540 for (i = 0; i < count; i++) {
2541 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2542 sizeof(struct eckd_count));
2543 if (IS_ERR(last_tidaw)) {
2544 rc = -EINVAL;
2545 goto out_err;
2546 }
2547 }
2548
2549 last_tidaw->flags |= TIDAW_FLAGS_LAST;
2550 itcw_finalize(itcw);
2551
2552 cqr->cpmode = 1;
2553 cqr->startdev = startdev;
2554 cqr->memdev = startdev;
2555 cqr->basedev = base;
2556 cqr->retries = startdev->default_retries;
2557 cqr->expires = startdev->default_expires * HZ;
2558 cqr->buildclk = get_tod_clock();
2559 cqr->status = DASD_CQR_FILLED;
2560 /* Set flags to suppress output for expected errors */
2561 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2562
2563 return cqr;
2564
2565 out_err:
2566 dasd_sfree_request(cqr, startdev);
2567
2568 return ERR_PTR(rc);
2569 }
2570
2571 /*
2572 * Build the CCW request for the format check
2573 */
2574 static struct dasd_ccw_req *
dasd_eckd_build_check(struct dasd_device * base,struct format_data_t * fdata,int enable_pav,struct eckd_count * fmt_buffer,int rpt)2575 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2576 int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2577 {
2578 struct dasd_eckd_private *start_priv;
2579 struct dasd_eckd_private *base_priv;
2580 struct dasd_device *startdev = NULL;
2581 struct dasd_ccw_req *cqr;
2582 struct ccw1 *ccw;
2583 void *data;
2584 int cplength, datasize;
2585 int use_prefix;
2586 int count;
2587 int i;
2588
2589 if (enable_pav)
2590 startdev = dasd_alias_get_start_dev(base);
2591
2592 if (!startdev)
2593 startdev = base;
2594
2595 start_priv = startdev->private;
2596 base_priv = base->private;
2597
2598 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2599
2600 use_prefix = base_priv->features.feature[8] & 0x01;
2601
2602 if (use_prefix) {
2603 cplength = 1;
2604 datasize = sizeof(struct PFX_eckd_data);
2605 } else {
2606 cplength = 2;
2607 datasize = sizeof(struct DE_eckd_data) +
2608 sizeof(struct LO_eckd_data);
2609 }
2610 cplength += count;
2611
2612 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2613 if (IS_ERR(cqr))
2614 return cqr;
2615
2616 start_priv->count++;
2617 data = cqr->data;
2618 ccw = cqr->cpaddr;
2619
2620 if (use_prefix) {
2621 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2622 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2623 count, 0, 0);
2624 } else {
2625 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2626 DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2627
2628 data += sizeof(struct DE_eckd_data);
2629 ccw[-1].flags |= CCW_FLAG_CC;
2630
2631 locate_record(ccw++, data, fdata->start_unit, 0, count,
2632 DASD_ECKD_CCW_READ_COUNT, base, 0);
2633 }
2634
2635 for (i = 0; i < count; i++) {
2636 ccw[-1].flags |= CCW_FLAG_CC;
2637 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2638 ccw->flags = CCW_FLAG_SLI;
2639 ccw->count = 8;
2640 ccw->cda = virt_to_dma32(fmt_buffer);
2641 ccw++;
2642 fmt_buffer++;
2643 }
2644
2645 cqr->startdev = startdev;
2646 cqr->memdev = startdev;
2647 cqr->basedev = base;
2648 cqr->retries = DASD_RETRIES;
2649 cqr->expires = startdev->default_expires * HZ;
2650 cqr->buildclk = get_tod_clock();
2651 cqr->status = DASD_CQR_FILLED;
2652 /* Set flags to suppress output for expected errors */
2653 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2654
2655 return cqr;
2656 }
2657
2658 static struct dasd_ccw_req *
dasd_eckd_build_format(struct dasd_device * base,struct dasd_device * startdev,struct format_data_t * fdata,int enable_pav)2659 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2660 struct format_data_t *fdata, int enable_pav)
2661 {
2662 struct dasd_eckd_private *base_priv;
2663 struct dasd_eckd_private *start_priv;
2664 struct dasd_ccw_req *fcp;
2665 struct eckd_count *ect;
2666 struct ch_t address;
2667 struct ccw1 *ccw;
2668 void *data;
2669 int rpt;
2670 int cplength, datasize;
2671 int i, j;
2672 int intensity = 0;
2673 int r0_perm;
2674 int nr_tracks;
2675 int use_prefix;
2676
2677 if (enable_pav)
2678 startdev = dasd_alias_get_start_dev(base);
2679
2680 if (!startdev)
2681 startdev = base;
2682
2683 start_priv = startdev->private;
2684 base_priv = base->private;
2685
2686 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2687
2688 nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2689
2690 /*
2691 * fdata->intensity is a bit string that tells us what to do:
2692 * Bit 0: write record zero
2693 * Bit 1: write home address, currently not supported
2694 * Bit 2: invalidate tracks
2695 * Bit 3: use OS/390 compatible disk layout (cdl)
2696 * Bit 4: do not allow storage subsystem to modify record zero
2697 * Only some bit combinations do make sense.
2698 */
2699 if (fdata->intensity & 0x10) {
2700 r0_perm = 0;
2701 intensity = fdata->intensity & ~0x10;
2702 } else {
2703 r0_perm = 1;
2704 intensity = fdata->intensity;
2705 }
2706
2707 use_prefix = base_priv->features.feature[8] & 0x01;
2708
2709 switch (intensity) {
2710 case 0x00: /* Normal format */
2711 case 0x08: /* Normal format, use cdl. */
2712 cplength = 2 + (rpt*nr_tracks);
2713 if (use_prefix)
2714 datasize = sizeof(struct PFX_eckd_data) +
2715 sizeof(struct LO_eckd_data) +
2716 rpt * nr_tracks * sizeof(struct eckd_count);
2717 else
2718 datasize = sizeof(struct DE_eckd_data) +
2719 sizeof(struct LO_eckd_data) +
2720 rpt * nr_tracks * sizeof(struct eckd_count);
2721 break;
2722 case 0x01: /* Write record zero and format track. */
2723 case 0x09: /* Write record zero and format track, use cdl. */
2724 cplength = 2 + rpt * nr_tracks;
2725 if (use_prefix)
2726 datasize = sizeof(struct PFX_eckd_data) +
2727 sizeof(struct LO_eckd_data) +
2728 sizeof(struct eckd_count) +
2729 rpt * nr_tracks * sizeof(struct eckd_count);
2730 else
2731 datasize = sizeof(struct DE_eckd_data) +
2732 sizeof(struct LO_eckd_data) +
2733 sizeof(struct eckd_count) +
2734 rpt * nr_tracks * sizeof(struct eckd_count);
2735 break;
2736 case 0x04: /* Invalidate track. */
2737 case 0x0c: /* Invalidate track, use cdl. */
2738 cplength = 3;
2739 if (use_prefix)
2740 datasize = sizeof(struct PFX_eckd_data) +
2741 sizeof(struct LO_eckd_data) +
2742 sizeof(struct eckd_count);
2743 else
2744 datasize = sizeof(struct DE_eckd_data) +
2745 sizeof(struct LO_eckd_data) +
2746 sizeof(struct eckd_count);
2747 break;
2748 default:
2749 dev_warn(&startdev->cdev->dev,
2750 "An I/O control call used incorrect flags 0x%x\n",
2751 fdata->intensity);
2752 return ERR_PTR(-EINVAL);
2753 }
2754
2755 fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2756 if (IS_ERR(fcp))
2757 return fcp;
2758
2759 start_priv->count++;
2760 data = fcp->data;
2761 ccw = fcp->cpaddr;
2762
2763 switch (intensity & ~0x08) {
2764 case 0x00: /* Normal format. */
2765 if (use_prefix) {
2766 prefix(ccw++, (struct PFX_eckd_data *) data,
2767 fdata->start_unit, fdata->stop_unit,
2768 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2769 /* grant subsystem permission to format R0 */
2770 if (r0_perm)
2771 ((struct PFX_eckd_data *)data)
2772 ->define_extent.ga_extended |= 0x04;
2773 data += sizeof(struct PFX_eckd_data);
2774 } else {
2775 define_extent(ccw++, (struct DE_eckd_data *) data,
2776 fdata->start_unit, fdata->stop_unit,
2777 DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2778 /* grant subsystem permission to format R0 */
2779 if (r0_perm)
2780 ((struct DE_eckd_data *) data)
2781 ->ga_extended |= 0x04;
2782 data += sizeof(struct DE_eckd_data);
2783 }
2784 ccw[-1].flags |= CCW_FLAG_CC;
2785 locate_record(ccw++, (struct LO_eckd_data *) data,
2786 fdata->start_unit, 0, rpt*nr_tracks,
2787 DASD_ECKD_CCW_WRITE_CKD, base,
2788 fdata->blksize);
2789 data += sizeof(struct LO_eckd_data);
2790 break;
2791 case 0x01: /* Write record zero + format track. */
2792 if (use_prefix) {
2793 prefix(ccw++, (struct PFX_eckd_data *) data,
2794 fdata->start_unit, fdata->stop_unit,
2795 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2796 base, startdev);
2797 data += sizeof(struct PFX_eckd_data);
2798 } else {
2799 define_extent(ccw++, (struct DE_eckd_data *) data,
2800 fdata->start_unit, fdata->stop_unit,
2801 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2802 data += sizeof(struct DE_eckd_data);
2803 }
2804 ccw[-1].flags |= CCW_FLAG_CC;
2805 locate_record(ccw++, (struct LO_eckd_data *) data,
2806 fdata->start_unit, 0, rpt * nr_tracks + 1,
2807 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2808 base->block->bp_block);
2809 data += sizeof(struct LO_eckd_data);
2810 break;
2811 case 0x04: /* Invalidate track. */
2812 if (use_prefix) {
2813 prefix(ccw++, (struct PFX_eckd_data *) data,
2814 fdata->start_unit, fdata->stop_unit,
2815 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2816 data += sizeof(struct PFX_eckd_data);
2817 } else {
2818 define_extent(ccw++, (struct DE_eckd_data *) data,
2819 fdata->start_unit, fdata->stop_unit,
2820 DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2821 data += sizeof(struct DE_eckd_data);
2822 }
2823 ccw[-1].flags |= CCW_FLAG_CC;
2824 locate_record(ccw++, (struct LO_eckd_data *) data,
2825 fdata->start_unit, 0, 1,
2826 DASD_ECKD_CCW_WRITE_CKD, base, 8);
2827 data += sizeof(struct LO_eckd_data);
2828 break;
2829 }
2830
2831 for (j = 0; j < nr_tracks; j++) {
2832 /* calculate cylinder and head for the current track */
2833 set_ch_t(&address,
2834 (fdata->start_unit + j) /
2835 base_priv->rdc_data.trk_per_cyl,
2836 (fdata->start_unit + j) %
2837 base_priv->rdc_data.trk_per_cyl);
2838 if (intensity & 0x01) { /* write record zero */
2839 ect = (struct eckd_count *) data;
2840 data += sizeof(struct eckd_count);
2841 ect->cyl = address.cyl;
2842 ect->head = address.head;
2843 ect->record = 0;
2844 ect->kl = 0;
2845 ect->dl = 8;
2846 ccw[-1].flags |= CCW_FLAG_CC;
2847 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2848 ccw->flags = CCW_FLAG_SLI;
2849 ccw->count = 8;
2850 ccw->cda = virt_to_dma32(ect);
2851 ccw++;
2852 }
2853 if ((intensity & ~0x08) & 0x04) { /* erase track */
2854 ect = (struct eckd_count *) data;
2855 data += sizeof(struct eckd_count);
2856 ect->cyl = address.cyl;
2857 ect->head = address.head;
2858 ect->record = 1;
2859 ect->kl = 0;
2860 ect->dl = 0;
2861 ccw[-1].flags |= CCW_FLAG_CC;
2862 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2863 ccw->flags = CCW_FLAG_SLI;
2864 ccw->count = 8;
2865 ccw->cda = virt_to_dma32(ect);
2866 } else { /* write remaining records */
2867 for (i = 0; i < rpt; i++) {
2868 ect = (struct eckd_count *) data;
2869 data += sizeof(struct eckd_count);
2870 ect->cyl = address.cyl;
2871 ect->head = address.head;
2872 ect->record = i + 1;
2873 ect->kl = 0;
2874 ect->dl = fdata->blksize;
2875 /*
2876 * Check for special tracks 0-1
2877 * when formatting CDL
2878 */
2879 if ((intensity & 0x08) &&
2880 address.cyl == 0 && address.head == 0) {
2881 if (i < 3) {
2882 ect->kl = 4;
2883 ect->dl = sizes_trk0[i] - 4;
2884 }
2885 }
2886 if ((intensity & 0x08) &&
2887 address.cyl == 0 && address.head == 1) {
2888 ect->kl = 44;
2889 ect->dl = LABEL_SIZE - 44;
2890 }
2891 ccw[-1].flags |= CCW_FLAG_CC;
2892 if (i != 0 || j == 0)
2893 ccw->cmd_code =
2894 DASD_ECKD_CCW_WRITE_CKD;
2895 else
2896 ccw->cmd_code =
2897 DASD_ECKD_CCW_WRITE_CKD_MT;
2898 ccw->flags = CCW_FLAG_SLI;
2899 ccw->count = 8;
2900 ccw->cda = virt_to_dma32(ect);
2901 ccw++;
2902 }
2903 }
2904 }
2905
2906 fcp->startdev = startdev;
2907 fcp->memdev = startdev;
2908 fcp->basedev = base;
2909 fcp->retries = 256;
2910 fcp->expires = startdev->default_expires * HZ;
2911 fcp->buildclk = get_tod_clock();
2912 fcp->status = DASD_CQR_FILLED;
2913
2914 return fcp;
2915 }
2916
2917 /*
2918 * Wrapper function to build a CCW request depending on input data
2919 */
2920 static struct dasd_ccw_req *
dasd_eckd_format_build_ccw_req(struct dasd_device * base,struct format_data_t * fdata,int enable_pav,int tpm,struct eckd_count * fmt_buffer,int rpt)2921 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2922 struct format_data_t *fdata, int enable_pav,
2923 int tpm, struct eckd_count *fmt_buffer, int rpt)
2924 {
2925 struct dasd_ccw_req *ccw_req;
2926
2927 if (!fmt_buffer) {
2928 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2929 } else {
2930 if (tpm)
2931 ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2932 enable_pav,
2933 fmt_buffer, rpt);
2934 else
2935 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2936 fmt_buffer, rpt);
2937 }
2938
2939 return ccw_req;
2940 }
2941
2942 /*
2943 * Sanity checks on format_data
2944 */
dasd_eckd_format_sanity_checks(struct dasd_device * base,struct format_data_t * fdata)2945 static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2946 struct format_data_t *fdata)
2947 {
2948 struct dasd_eckd_private *private = base->private;
2949
2950 if (fdata->start_unit >=
2951 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2952 dev_warn(&base->cdev->dev,
2953 "Start track number %u used in formatting is too big\n",
2954 fdata->start_unit);
2955 return -EINVAL;
2956 }
2957 if (fdata->stop_unit >=
2958 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2959 dev_warn(&base->cdev->dev,
2960 "Stop track number %u used in formatting is too big\n",
2961 fdata->stop_unit);
2962 return -EINVAL;
2963 }
2964 if (fdata->start_unit > fdata->stop_unit) {
2965 dev_warn(&base->cdev->dev,
2966 "Start track %u used in formatting exceeds end track\n",
2967 fdata->start_unit);
2968 return -EINVAL;
2969 }
2970 if (dasd_check_blocksize(fdata->blksize) != 0) {
2971 dev_warn(&base->cdev->dev,
2972 "The DASD cannot be formatted with block size %u\n",
2973 fdata->blksize);
2974 return -EINVAL;
2975 }
2976 return 0;
2977 }
2978
2979 /*
2980 * This function will process format_data originally coming from an IOCTL
2981 */
dasd_eckd_format_process_data(struct dasd_device * base,struct format_data_t * fdata,int enable_pav,int tpm,struct eckd_count * fmt_buffer,int rpt,struct irb * irb)2982 static int dasd_eckd_format_process_data(struct dasd_device *base,
2983 struct format_data_t *fdata,
2984 int enable_pav, int tpm,
2985 struct eckd_count *fmt_buffer, int rpt,
2986 struct irb *irb)
2987 {
2988 struct dasd_eckd_private *private = base->private;
2989 struct dasd_ccw_req *cqr, *n;
2990 struct list_head format_queue;
2991 struct dasd_device *device;
2992 char *sense = NULL;
2993 int old_start, old_stop, format_step;
2994 int step, retry;
2995 int rc;
2996
2997 rc = dasd_eckd_format_sanity_checks(base, fdata);
2998 if (rc)
2999 return rc;
3000
3001 INIT_LIST_HEAD(&format_queue);
3002
3003 old_start = fdata->start_unit;
3004 old_stop = fdata->stop_unit;
3005
3006 if (!tpm && fmt_buffer != NULL) {
3007 /* Command Mode / Format Check */
3008 format_step = 1;
3009 } else if (tpm && fmt_buffer != NULL) {
3010 /* Transport Mode / Format Check */
3011 format_step = DASD_CQR_MAX_CCW / rpt;
3012 } else {
3013 /* Normal Formatting */
3014 format_step = DASD_CQR_MAX_CCW /
3015 recs_per_track(&private->rdc_data, 0, fdata->blksize);
3016 }
3017
3018 do {
3019 retry = 0;
3020 while (fdata->start_unit <= old_stop) {
3021 step = fdata->stop_unit - fdata->start_unit + 1;
3022 if (step > format_step) {
3023 fdata->stop_unit =
3024 fdata->start_unit + format_step - 1;
3025 }
3026
3027 cqr = dasd_eckd_format_build_ccw_req(base, fdata,
3028 enable_pav, tpm,
3029 fmt_buffer, rpt);
3030 if (IS_ERR(cqr)) {
3031 rc = PTR_ERR(cqr);
3032 if (rc == -ENOMEM) {
3033 if (list_empty(&format_queue))
3034 goto out;
3035 /*
3036 * not enough memory available, start
3037 * requests retry after first requests
3038 * were finished
3039 */
3040 retry = 1;
3041 break;
3042 }
3043 goto out_err;
3044 }
3045 list_add_tail(&cqr->blocklist, &format_queue);
3046
3047 if (fmt_buffer) {
3048 step = fdata->stop_unit - fdata->start_unit + 1;
3049 fmt_buffer += rpt * step;
3050 }
3051 fdata->start_unit = fdata->stop_unit + 1;
3052 fdata->stop_unit = old_stop;
3053 }
3054
3055 rc = dasd_sleep_on_queue(&format_queue);
3056
3057 out_err:
3058 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
3059 device = cqr->startdev;
3060 private = device->private;
3061
3062 if (cqr->status == DASD_CQR_FAILED) {
3063 /*
3064 * Only get sense data if called by format
3065 * check
3066 */
3067 if (fmt_buffer && irb) {
3068 sense = dasd_get_sense(&cqr->irb);
3069 memcpy(irb, &cqr->irb, sizeof(*irb));
3070 }
3071 rc = -EIO;
3072 }
3073 list_del_init(&cqr->blocklist);
3074 dasd_ffree_request(cqr, device);
3075 private->count--;
3076 }
3077
3078 if (rc && rc != -EIO)
3079 goto out;
3080 if (rc == -EIO) {
3081 /*
3082 * In case fewer than the expected records are on the
3083 * track, we will most likely get a 'No Record Found'
3084 * error (in command mode) or a 'File Protected' error
3085 * (in transport mode). Those particular cases shouldn't
3086 * pass the -EIO to the IOCTL, therefore reset the rc
3087 * and continue.
3088 */
3089 if (sense &&
3090 (sense[1] & SNS1_NO_REC_FOUND ||
3091 sense[1] & SNS1_FILE_PROTECTED))
3092 retry = 1;
3093 else
3094 goto out;
3095 }
3096
3097 } while (retry);
3098
3099 out:
3100 fdata->start_unit = old_start;
3101 fdata->stop_unit = old_stop;
3102
3103 return rc;
3104 }
3105
dasd_eckd_format_device(struct dasd_device * base,struct format_data_t * fdata,int enable_pav)3106 static int dasd_eckd_format_device(struct dasd_device *base,
3107 struct format_data_t *fdata, int enable_pav)
3108 {
3109 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
3110 0, NULL);
3111 }
3112
test_and_set_format_track(struct dasd_format_entry * to_format,struct dasd_ccw_req * cqr)3113 static bool test_and_set_format_track(struct dasd_format_entry *to_format,
3114 struct dasd_ccw_req *cqr)
3115 {
3116 struct dasd_block *block = cqr->block;
3117 struct dasd_format_entry *format;
3118 unsigned long flags;
3119 bool rc = false;
3120
3121 spin_lock_irqsave(&block->format_lock, flags);
3122 if (cqr->trkcount != atomic_read(&block->trkcount)) {
3123 /*
3124 * The number of formatted tracks has changed after request
3125 * start and we can not tell if the current track was involved.
3126 * To avoid data corruption treat it as if the current track is
3127 * involved
3128 */
3129 rc = true;
3130 goto out;
3131 }
3132 list_for_each_entry(format, &block->format_list, list) {
3133 if (format->track == to_format->track) {
3134 rc = true;
3135 goto out;
3136 }
3137 }
3138 list_add_tail(&to_format->list, &block->format_list);
3139
3140 out:
3141 spin_unlock_irqrestore(&block->format_lock, flags);
3142 return rc;
3143 }
3144
clear_format_track(struct dasd_format_entry * format,struct dasd_block * block)3145 static void clear_format_track(struct dasd_format_entry *format,
3146 struct dasd_block *block)
3147 {
3148 unsigned long flags;
3149
3150 spin_lock_irqsave(&block->format_lock, flags);
3151 atomic_inc(&block->trkcount);
3152 list_del_init(&format->list);
3153 spin_unlock_irqrestore(&block->format_lock, flags);
3154 }
3155
3156 /*
3157 * Callback function to free ESE format requests.
3158 */
dasd_eckd_ese_format_cb(struct dasd_ccw_req * cqr,void * data)3159 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3160 {
3161 struct dasd_device *device = cqr->startdev;
3162 struct dasd_eckd_private *private = device->private;
3163 struct dasd_format_entry *format = data;
3164
3165 clear_format_track(format, cqr->basedev->block);
3166 private->count--;
3167 dasd_ffree_request(cqr, device);
3168 }
3169
3170 static struct dasd_ccw_req *
dasd_eckd_ese_format(struct dasd_device * startdev,struct dasd_ccw_req * cqr,struct irb * irb)3171 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3172 struct irb *irb)
3173 {
3174 struct dasd_eckd_private *private;
3175 struct dasd_format_entry *format;
3176 struct format_data_t fdata;
3177 unsigned int recs_per_trk;
3178 struct dasd_ccw_req *fcqr;
3179 struct dasd_device *base;
3180 struct dasd_block *block;
3181 unsigned int blksize;
3182 struct request *req;
3183 sector_t first_trk;
3184 sector_t last_trk;
3185 sector_t curr_trk;
3186 int rc;
3187
3188 req = dasd_get_callback_data(cqr);
3189 block = cqr->block;
3190 base = block->base;
3191 private = base->private;
3192 blksize = block->bp_block;
3193 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3194 format = &startdev->format_entry;
3195
3196 first_trk = blk_rq_pos(req) >> block->s2b_shift;
3197 sector_div(first_trk, recs_per_trk);
3198 last_trk =
3199 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3200 sector_div(last_trk, recs_per_trk);
3201 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3202 if (rc)
3203 return ERR_PTR(rc);
3204
3205 if (curr_trk < first_trk || curr_trk > last_trk) {
3206 DBF_DEV_EVENT(DBF_WARNING, startdev,
3207 "ESE error track %llu not within range %llu - %llu\n",
3208 curr_trk, first_trk, last_trk);
3209 return ERR_PTR(-EINVAL);
3210 }
3211 format->track = curr_trk;
3212 /* test if track is already in formatting by another thread */
3213 if (test_and_set_format_track(format, cqr)) {
3214 /* this is no real error so do not count down retries */
3215 cqr->retries++;
3216 return ERR_PTR(-EEXIST);
3217 }
3218
3219 fdata.start_unit = curr_trk;
3220 fdata.stop_unit = curr_trk;
3221 fdata.blksize = blksize;
3222 fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
3223
3224 rc = dasd_eckd_format_sanity_checks(base, &fdata);
3225 if (rc)
3226 return ERR_PTR(-EINVAL);
3227
3228 /*
3229 * We're building the request with PAV disabled as we're reusing
3230 * the former startdev.
3231 */
3232 fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
3233 if (IS_ERR(fcqr))
3234 return fcqr;
3235
3236 fcqr->callback = dasd_eckd_ese_format_cb;
3237 fcqr->callback_data = (void *) format;
3238
3239 return fcqr;
3240 }
3241
3242 /*
3243 * When data is read from an unformatted area of an ESE volume, this function
3244 * returns zeroed data and thereby mimics a read of zero data.
3245 *
3246 * The first unformatted track is the one that got the NRF error, the address is
3247 * encoded in the sense data.
3248 *
3249 * All tracks before have returned valid data and should not be touched.
3250 * All tracks after the unformatted track might be formatted or not. This is
3251 * currently not known, remember the processed data and return the remainder of
3252 * the request to the blocklayer in __dasd_cleanup_cqr().
3253 */
dasd_eckd_ese_read(struct dasd_ccw_req * cqr,struct irb * irb)3254 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3255 {
3256 struct dasd_eckd_private *private;
3257 sector_t first_trk, last_trk;
3258 sector_t first_blk, last_blk;
3259 unsigned int blksize, off;
3260 unsigned int recs_per_trk;
3261 struct dasd_device *base;
3262 struct req_iterator iter;
3263 struct dasd_block *block;
3264 unsigned int skip_block;
3265 unsigned int blk_count;
3266 struct request *req;
3267 struct bio_vec bv;
3268 sector_t curr_trk;
3269 sector_t end_blk;
3270 char *dst;
3271 int rc;
3272
3273 req = (struct request *) cqr->callback_data;
3274 base = cqr->block->base;
3275 blksize = base->block->bp_block;
3276 block = cqr->block;
3277 private = base->private;
3278 skip_block = 0;
3279 blk_count = 0;
3280
3281 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3282 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
3283 sector_div(first_trk, recs_per_trk);
3284 last_trk = last_blk =
3285 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3286 sector_div(last_trk, recs_per_trk);
3287 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3288 if (rc)
3289 return rc;
3290
3291 /* sanity check if the current track from sense data is valid */
3292 if (curr_trk < first_trk || curr_trk > last_trk) {
3293 DBF_DEV_EVENT(DBF_WARNING, base,
3294 "ESE error track %llu not within range %llu - %llu\n",
3295 curr_trk, first_trk, last_trk);
3296 return -EINVAL;
3297 }
3298
3299 /*
3300 * if not the first track got the NRF error we have to skip over valid
3301 * blocks
3302 */
3303 if (curr_trk != first_trk)
3304 skip_block = curr_trk * recs_per_trk - first_blk;
3305
3306 /* we have no information beyond the current track */
3307 end_blk = (curr_trk + 1) * recs_per_trk;
3308
3309 rq_for_each_segment(bv, req, iter) {
3310 dst = bvec_virt(&bv);
3311 for (off = 0; off < bv.bv_len; off += blksize) {
3312 if (first_blk + blk_count >= end_blk) {
3313 cqr->proc_bytes = blk_count * blksize;
3314 return 0;
3315 }
3316 if (dst && !skip_block)
3317 memset(dst, 0, blksize);
3318 else
3319 skip_block--;
3320 dst += blksize;
3321 blk_count++;
3322 }
3323 }
3324 return 0;
3325 }
3326
3327 /*
3328 * Helper function to count consecutive records of a single track.
3329 */
dasd_eckd_count_records(struct eckd_count * fmt_buffer,int start,int max)3330 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3331 int max)
3332 {
3333 int head;
3334 int i;
3335
3336 head = fmt_buffer[start].head;
3337
3338 /*
3339 * There are 3 conditions where we stop counting:
3340 * - if data reoccurs (same head and record may reoccur), which may
3341 * happen due to the way DASD_ECKD_CCW_READ_COUNT works
3342 * - when the head changes, because we're iterating over several tracks
3343 * then (DASD_ECKD_CCW_READ_COUNT_MT)
3344 * - when we've reached the end of sensible data in the buffer (the
3345 * record will be 0 then)
3346 */
3347 for (i = start; i < max; i++) {
3348 if (i > start) {
3349 if ((fmt_buffer[i].head == head &&
3350 fmt_buffer[i].record == 1) ||
3351 fmt_buffer[i].head != head ||
3352 fmt_buffer[i].record == 0)
3353 break;
3354 }
3355 }
3356
3357 return i - start;
3358 }
3359
3360 /*
3361 * Evaluate a given range of tracks. Data like number of records, blocksize,
3362 * record ids, and key length are compared with expected data.
3363 *
3364 * If a mismatch occurs, the corresponding error bit is set, as well as
3365 * additional information, depending on the error.
3366 */
dasd_eckd_format_evaluate_tracks(struct eckd_count * fmt_buffer,struct format_check_t * cdata,int rpt_max,int rpt_exp,int trk_per_cyl,int tpm)3367 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3368 struct format_check_t *cdata,
3369 int rpt_max, int rpt_exp,
3370 int trk_per_cyl, int tpm)
3371 {
3372 struct ch_t geo;
3373 int max_entries;
3374 int count = 0;
3375 int trkcount;
3376 int blksize;
3377 int pos = 0;
3378 int i, j;
3379 int kl;
3380
3381 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3382 max_entries = trkcount * rpt_max;
3383
3384 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3385 /* Calculate the correct next starting position in the buffer */
3386 if (tpm) {
3387 while (fmt_buffer[pos].record == 0 &&
3388 fmt_buffer[pos].dl == 0) {
3389 if (pos++ > max_entries)
3390 break;
3391 }
3392 } else {
3393 if (i != cdata->expect.start_unit)
3394 pos += rpt_max - count;
3395 }
3396
3397 /* Calculate the expected geo values for the current track */
3398 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3399
3400 /* Count and check number of records */
3401 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3402
3403 if (count < rpt_exp) {
3404 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3405 break;
3406 }
3407 if (count > rpt_exp) {
3408 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3409 break;
3410 }
3411
3412 for (j = 0; j < count; j++, pos++) {
3413 blksize = cdata->expect.blksize;
3414 kl = 0;
3415
3416 /*
3417 * Set special values when checking CDL formatted
3418 * devices.
3419 */
3420 if ((cdata->expect.intensity & 0x08) &&
3421 geo.cyl == 0 && geo.head == 0) {
3422 if (j < 3) {
3423 blksize = sizes_trk0[j] - 4;
3424 kl = 4;
3425 }
3426 }
3427 if ((cdata->expect.intensity & 0x08) &&
3428 geo.cyl == 0 && geo.head == 1) {
3429 blksize = LABEL_SIZE - 44;
3430 kl = 44;
3431 }
3432
3433 /* Check blocksize */
3434 if (fmt_buffer[pos].dl != blksize) {
3435 cdata->result = DASD_FMT_ERR_BLKSIZE;
3436 goto out;
3437 }
3438 /* Check if key length is 0 */
3439 if (fmt_buffer[pos].kl != kl) {
3440 cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3441 goto out;
3442 }
3443 /* Check if record_id is correct */
3444 if (fmt_buffer[pos].cyl != geo.cyl ||
3445 fmt_buffer[pos].head != geo.head ||
3446 fmt_buffer[pos].record != (j + 1)) {
3447 cdata->result = DASD_FMT_ERR_RECORD_ID;
3448 goto out;
3449 }
3450 }
3451 }
3452
3453 out:
3454 /*
3455 * In case of no errors, we need to decrease by one
3456 * to get the correct positions.
3457 */
3458 if (!cdata->result) {
3459 i--;
3460 pos--;
3461 }
3462
3463 cdata->unit = i;
3464 cdata->num_records = count;
3465 cdata->rec = fmt_buffer[pos].record;
3466 cdata->blksize = fmt_buffer[pos].dl;
3467 cdata->key_length = fmt_buffer[pos].kl;
3468 }
3469
3470 /*
3471 * Check the format of a range of tracks of a DASD.
3472 */
dasd_eckd_check_device_format(struct dasd_device * base,struct format_check_t * cdata,int enable_pav)3473 static int dasd_eckd_check_device_format(struct dasd_device *base,
3474 struct format_check_t *cdata,
3475 int enable_pav)
3476 {
3477 struct dasd_eckd_private *private = base->private;
3478 struct eckd_count *fmt_buffer;
3479 struct irb irb;
3480 int rpt_max, rpt_exp;
3481 int fmt_buffer_size;
3482 int trk_per_cyl;
3483 int trkcount;
3484 int tpm = 0;
3485 int rc;
3486
3487 trk_per_cyl = private->rdc_data.trk_per_cyl;
3488
3489 /* Get maximum and expected amount of records per track */
3490 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3491 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3492
3493 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3494 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3495
3496 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3497 if (!fmt_buffer)
3498 return -ENOMEM;
3499
3500 /*
3501 * A certain FICON feature subset is needed to operate in transport
3502 * mode. Additionally, the support for transport mode is implicitly
3503 * checked by comparing the buffer size with fcx_max_data. As long as
3504 * the buffer size is smaller we can operate in transport mode and
3505 * process multiple tracks. If not, only one track at once is being
3506 * processed using command mode.
3507 */
3508 if ((private->features.feature[40] & 0x04) &&
3509 fmt_buffer_size <= private->fcx_max_data)
3510 tpm = 1;
3511
3512 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3513 tpm, fmt_buffer, rpt_max, &irb);
3514 if (rc && rc != -EIO)
3515 goto out;
3516 if (rc == -EIO) {
3517 /*
3518 * If our first attempt with transport mode enabled comes back
3519 * with an incorrect length error, we're going to retry the
3520 * check with command mode.
3521 */
3522 if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3523 tpm = 0;
3524 rc = dasd_eckd_format_process_data(base, &cdata->expect,
3525 enable_pav, tpm,
3526 fmt_buffer, rpt_max,
3527 &irb);
3528 if (rc)
3529 goto out;
3530 } else {
3531 goto out;
3532 }
3533 }
3534
3535 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3536 trk_per_cyl, tpm);
3537
3538 out:
3539 kfree(fmt_buffer);
3540
3541 return rc;
3542 }
3543
dasd_eckd_handle_terminated_request(struct dasd_ccw_req * cqr)3544 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3545 {
3546 if (cqr->retries < 0) {
3547 cqr->status = DASD_CQR_FAILED;
3548 return;
3549 }
3550 cqr->status = DASD_CQR_FILLED;
3551 if (cqr->block && (cqr->startdev != cqr->block->base)) {
3552 dasd_eckd_reset_ccw_to_base_io(cqr);
3553 cqr->startdev = cqr->block->base;
3554 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3555 }
3556 };
3557
3558 static dasd_erp_fn_t
dasd_eckd_erp_action(struct dasd_ccw_req * cqr)3559 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3560 {
3561 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3562 struct ccw_device *cdev = device->cdev;
3563
3564 switch (cdev->id.cu_type) {
3565 case 0x3990:
3566 case 0x2105:
3567 case 0x2107:
3568 case 0x1750:
3569 return dasd_3990_erp_action;
3570 case 0x9343:
3571 case 0x3880:
3572 default:
3573 return dasd_default_erp_action;
3574 }
3575 }
3576
3577 static dasd_erp_fn_t
dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)3578 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3579 {
3580 return dasd_default_erp_postaction;
3581 }
3582
dasd_eckd_check_for_device_change(struct dasd_device * device,struct dasd_ccw_req * cqr,struct irb * irb)3583 static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3584 struct dasd_ccw_req *cqr,
3585 struct irb *irb)
3586 {
3587 char mask;
3588 char *sense = NULL;
3589 struct dasd_eckd_private *private = device->private;
3590
3591 /* first of all check for state change pending interrupt */
3592 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3593 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3594 /*
3595 * for alias only, not in offline processing
3596 * and only if not suspended
3597 */
3598 if (!device->block && private->lcu &&
3599 device->state == DASD_STATE_ONLINE &&
3600 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3601 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3602 /* schedule worker to reload device */
3603 dasd_reload_device(device);
3604 }
3605 dasd_generic_handle_state_change(device);
3606 return;
3607 }
3608
3609 sense = dasd_get_sense(irb);
3610 if (!sense)
3611 return;
3612
3613 /* summary unit check */
3614 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3615 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3616 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3617 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3618 "eckd suc: device already notified");
3619 return;
3620 }
3621 sense = dasd_get_sense(irb);
3622 if (!sense) {
3623 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3624 "eckd suc: no reason code available");
3625 clear_bit(DASD_FLAG_SUC, &device->flags);
3626 return;
3627
3628 }
3629 private->suc_reason = sense[8];
3630 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3631 "eckd handle summary unit check: reason",
3632 private->suc_reason);
3633 dasd_get_device(device);
3634 if (!schedule_work(&device->suc_work))
3635 dasd_put_device(device);
3636
3637 return;
3638 }
3639
3640 /* service information message SIM */
3641 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3642 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3643 dasd_3990_erp_handle_sim(device, sense);
3644 return;
3645 }
3646
3647 /* loss of device reservation is handled via base devices only
3648 * as alias devices may be used with several bases
3649 */
3650 if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3651 (sense[7] == 0x3F) &&
3652 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3653 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3654 if (device->features & DASD_FEATURE_FAILONSLCK)
3655 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3656 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3657 dev_err(&device->cdev->dev,
3658 "The device reservation was lost\n");
3659 }
3660 }
3661
dasd_eckd_ras_sanity_checks(struct dasd_device * device,unsigned int first_trk,unsigned int last_trk)3662 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3663 unsigned int first_trk,
3664 unsigned int last_trk)
3665 {
3666 struct dasd_eckd_private *private = device->private;
3667 unsigned int trks_per_vol;
3668 int rc = 0;
3669
3670 trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3671
3672 if (first_trk >= trks_per_vol) {
3673 dev_warn(&device->cdev->dev,
3674 "Start track number %u used in the space release command is too big\n",
3675 first_trk);
3676 rc = -EINVAL;
3677 } else if (last_trk >= trks_per_vol) {
3678 dev_warn(&device->cdev->dev,
3679 "Stop track number %u used in the space release command is too big\n",
3680 last_trk);
3681 rc = -EINVAL;
3682 } else if (first_trk > last_trk) {
3683 dev_warn(&device->cdev->dev,
3684 "Start track %u used in the space release command exceeds the end track\n",
3685 first_trk);
3686 rc = -EINVAL;
3687 }
3688 return rc;
3689 }
3690
3691 /*
3692 * Helper function to count the amount of involved extents within a given range
3693 * with extent alignment in mind.
3694 */
count_exts(unsigned int from,unsigned int to,int trks_per_ext)3695 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3696 {
3697 int cur_pos = 0;
3698 int count = 0;
3699 int tmp;
3700
3701 if (from == to)
3702 return 1;
3703
3704 /* Count first partial extent */
3705 if (from % trks_per_ext != 0) {
3706 tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3707 if (tmp > to)
3708 tmp = to;
3709 cur_pos = tmp - from + 1;
3710 count++;
3711 }
3712 /* Count full extents */
3713 if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3714 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3715 count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3716 cur_pos = tmp;
3717 }
3718 /* Count last partial extent */
3719 if (cur_pos < to)
3720 count++;
3721
3722 return count;
3723 }
3724
dasd_in_copy_relation(struct dasd_device * device)3725 static int dasd_in_copy_relation(struct dasd_device *device)
3726 {
3727 struct dasd_pprc_data_sc4 *temp;
3728 int rc;
3729
3730 if (!dasd_eckd_pprc_enabled(device))
3731 return 0;
3732
3733 temp = kzalloc(sizeof(*temp), GFP_KERNEL);
3734 if (!temp)
3735 return -ENOMEM;
3736
3737 rc = dasd_eckd_query_pprc_status(device, temp);
3738 if (!rc)
3739 rc = temp->dev_info[0].state;
3740
3741 kfree(temp);
3742 return rc;
3743 }
3744
3745 /*
3746 * Release allocated space for a given range or an entire volume.
3747 */
3748 static struct dasd_ccw_req *
dasd_eckd_dso_ras(struct dasd_device * device,struct dasd_block * block,struct request * req,unsigned int first_trk,unsigned int last_trk,int by_extent)3749 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3750 struct request *req, unsigned int first_trk,
3751 unsigned int last_trk, int by_extent)
3752 {
3753 struct dasd_eckd_private *private = device->private;
3754 struct dasd_dso_ras_ext_range *ras_range;
3755 struct dasd_rssd_features *features;
3756 struct dasd_dso_ras_data *ras_data;
3757 u16 heads, beg_head, end_head;
3758 int cur_to_trk, cur_from_trk;
3759 struct dasd_ccw_req *cqr;
3760 u32 beg_cyl, end_cyl;
3761 int copy_relation;
3762 struct ccw1 *ccw;
3763 int trks_per_ext;
3764 size_t ras_size;
3765 size_t size;
3766 int nr_exts;
3767 void *rq;
3768 int i;
3769
3770 if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3771 return ERR_PTR(-EINVAL);
3772
3773 copy_relation = dasd_in_copy_relation(device);
3774 if (copy_relation < 0)
3775 return ERR_PTR(copy_relation);
3776
3777 rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3778
3779 features = &private->features;
3780
3781 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3782 nr_exts = 0;
3783 if (by_extent)
3784 nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3785 ras_size = sizeof(*ras_data);
3786 size = ras_size + (nr_exts * sizeof(*ras_range));
3787
3788 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3789 if (IS_ERR(cqr)) {
3790 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3791 "Could not allocate RAS request");
3792 return cqr;
3793 }
3794
3795 ras_data = cqr->data;
3796 memset(ras_data, 0, size);
3797
3798 ras_data->order = DSO_ORDER_RAS;
3799 ras_data->flags.vol_type = 0; /* CKD volume */
3800 /* Release specified extents or entire volume */
3801 ras_data->op_flags.by_extent = by_extent;
3802 /*
3803 * This bit guarantees initialisation of tracks within an extent that is
3804 * not fully specified, but is only supported with a certain feature
3805 * subset and for devices not in a copy relation.
3806 */
3807 if (features->feature[56] & 0x01 && !copy_relation)
3808 ras_data->op_flags.guarantee_init = 1;
3809
3810 ras_data->lss = private->conf.ned->ID;
3811 ras_data->dev_addr = private->conf.ned->unit_addr;
3812 ras_data->nr_exts = nr_exts;
3813
3814 if (by_extent) {
3815 heads = private->rdc_data.trk_per_cyl;
3816 cur_from_trk = first_trk;
3817 cur_to_trk = first_trk + trks_per_ext -
3818 (first_trk % trks_per_ext) - 1;
3819 if (cur_to_trk > last_trk)
3820 cur_to_trk = last_trk;
3821 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3822
3823 for (i = 0; i < nr_exts; i++) {
3824 beg_cyl = cur_from_trk / heads;
3825 beg_head = cur_from_trk % heads;
3826 end_cyl = cur_to_trk / heads;
3827 end_head = cur_to_trk % heads;
3828
3829 set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3830 set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3831
3832 cur_from_trk = cur_to_trk + 1;
3833 cur_to_trk = cur_from_trk + trks_per_ext - 1;
3834 if (cur_to_trk > last_trk)
3835 cur_to_trk = last_trk;
3836 ras_range++;
3837 }
3838 }
3839
3840 ccw = cqr->cpaddr;
3841 ccw->cda = virt_to_dma32(cqr->data);
3842 ccw->cmd_code = DASD_ECKD_CCW_DSO;
3843 ccw->count = size;
3844
3845 cqr->startdev = device;
3846 cqr->memdev = device;
3847 cqr->block = block;
3848 cqr->retries = 256;
3849 cqr->expires = device->default_expires * HZ;
3850 cqr->buildclk = get_tod_clock();
3851 cqr->status = DASD_CQR_FILLED;
3852
3853 return cqr;
3854 }
3855
dasd_eckd_release_space_full(struct dasd_device * device)3856 static int dasd_eckd_release_space_full(struct dasd_device *device)
3857 {
3858 struct dasd_ccw_req *cqr;
3859 int rc;
3860
3861 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3862 if (IS_ERR(cqr))
3863 return PTR_ERR(cqr);
3864
3865 rc = dasd_sleep_on_interruptible(cqr);
3866
3867 dasd_sfree_request(cqr, cqr->memdev);
3868
3869 return rc;
3870 }
3871
dasd_eckd_release_space_trks(struct dasd_device * device,unsigned int from,unsigned int to)3872 static int dasd_eckd_release_space_trks(struct dasd_device *device,
3873 unsigned int from, unsigned int to)
3874 {
3875 struct dasd_eckd_private *private = device->private;
3876 struct dasd_block *block = device->block;
3877 struct dasd_ccw_req *cqr, *n;
3878 struct list_head ras_queue;
3879 unsigned int device_exts;
3880 int trks_per_ext;
3881 int stop, step;
3882 int cur_pos;
3883 int rc = 0;
3884 int retry;
3885
3886 INIT_LIST_HEAD(&ras_queue);
3887
3888 device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3889 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3890
3891 /* Make sure device limits are not exceeded */
3892 step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3893 cur_pos = from;
3894
3895 do {
3896 retry = 0;
3897 while (cur_pos < to) {
3898 stop = cur_pos + step -
3899 ((cur_pos + step) % trks_per_ext) - 1;
3900 if (stop > to)
3901 stop = to;
3902
3903 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3904 if (IS_ERR(cqr)) {
3905 rc = PTR_ERR(cqr);
3906 if (rc == -ENOMEM) {
3907 if (list_empty(&ras_queue))
3908 goto out;
3909 retry = 1;
3910 break;
3911 }
3912 goto err_out;
3913 }
3914
3915 spin_lock_irq(&block->queue_lock);
3916 list_add_tail(&cqr->blocklist, &ras_queue);
3917 spin_unlock_irq(&block->queue_lock);
3918 cur_pos = stop + 1;
3919 }
3920
3921 rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3922
3923 err_out:
3924 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3925 device = cqr->startdev;
3926 private = device->private;
3927
3928 spin_lock_irq(&block->queue_lock);
3929 list_del_init(&cqr->blocklist);
3930 spin_unlock_irq(&block->queue_lock);
3931 dasd_sfree_request(cqr, device);
3932 private->count--;
3933 }
3934 } while (retry);
3935
3936 out:
3937 return rc;
3938 }
3939
dasd_eckd_release_space(struct dasd_device * device,struct format_data_t * rdata)3940 static int dasd_eckd_release_space(struct dasd_device *device,
3941 struct format_data_t *rdata)
3942 {
3943 if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3944 return dasd_eckd_release_space_full(device);
3945 else if (rdata->intensity == 0)
3946 return dasd_eckd_release_space_trks(device, rdata->start_unit,
3947 rdata->stop_unit);
3948 else
3949 return -EINVAL;
3950 }
3951
dasd_eckd_build_cp_cmd_single(struct dasd_device * startdev,struct dasd_block * block,struct request * req,sector_t first_rec,sector_t last_rec,sector_t first_trk,sector_t last_trk,unsigned int first_offs,unsigned int last_offs,unsigned int blk_per_trk,unsigned int blksize)3952 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3953 struct dasd_device *startdev,
3954 struct dasd_block *block,
3955 struct request *req,
3956 sector_t first_rec,
3957 sector_t last_rec,
3958 sector_t first_trk,
3959 sector_t last_trk,
3960 unsigned int first_offs,
3961 unsigned int last_offs,
3962 unsigned int blk_per_trk,
3963 unsigned int blksize)
3964 {
3965 struct dasd_eckd_private *private;
3966 dma64_t *idaws;
3967 struct LO_eckd_data *LO_data;
3968 struct dasd_ccw_req *cqr;
3969 struct ccw1 *ccw;
3970 struct req_iterator iter;
3971 struct bio_vec bv;
3972 char *dst;
3973 unsigned int off;
3974 int count, cidaw, cplength, datasize;
3975 sector_t recid;
3976 unsigned char cmd, rcmd;
3977 int use_prefix;
3978 struct dasd_device *basedev;
3979
3980 basedev = block->base;
3981 private = basedev->private;
3982 if (rq_data_dir(req) == READ)
3983 cmd = DASD_ECKD_CCW_READ_MT;
3984 else if (rq_data_dir(req) == WRITE)
3985 cmd = DASD_ECKD_CCW_WRITE_MT;
3986 else
3987 return ERR_PTR(-EINVAL);
3988
3989 /* Check struct bio and count the number of blocks for the request. */
3990 count = 0;
3991 cidaw = 0;
3992 rq_for_each_segment(bv, req, iter) {
3993 if (bv.bv_len & (blksize - 1))
3994 /* Eckd can only do full blocks. */
3995 return ERR_PTR(-EINVAL);
3996 count += bv.bv_len >> (block->s2b_shift + 9);
3997 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3998 cidaw += bv.bv_len >> (block->s2b_shift + 9);
3999 }
4000 /* Paranoia. */
4001 if (count != last_rec - first_rec + 1)
4002 return ERR_PTR(-EINVAL);
4003
4004 /* use the prefix command if available */
4005 use_prefix = private->features.feature[8] & 0x01;
4006 if (use_prefix) {
4007 /* 1x prefix + number of blocks */
4008 cplength = 2 + count;
4009 /* 1x prefix + cidaws*sizeof(long) */
4010 datasize = sizeof(struct PFX_eckd_data) +
4011 sizeof(struct LO_eckd_data) +
4012 cidaw * sizeof(unsigned long);
4013 } else {
4014 /* 1x define extent + 1x locate record + number of blocks */
4015 cplength = 2 + count;
4016 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
4017 datasize = sizeof(struct DE_eckd_data) +
4018 sizeof(struct LO_eckd_data) +
4019 cidaw * sizeof(unsigned long);
4020 }
4021 /* Find out the number of additional locate record ccws for cdl. */
4022 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
4023 if (last_rec >= 2*blk_per_trk)
4024 count = 2*blk_per_trk - first_rec;
4025 cplength += count;
4026 datasize += count*sizeof(struct LO_eckd_data);
4027 }
4028 /* Allocate the ccw request. */
4029 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4030 startdev, blk_mq_rq_to_pdu(req));
4031 if (IS_ERR(cqr))
4032 return cqr;
4033 ccw = cqr->cpaddr;
4034 /* First ccw is define extent or prefix. */
4035 if (use_prefix) {
4036 if (prefix(ccw++, cqr->data, first_trk,
4037 last_trk, cmd, basedev, startdev) == -EAGAIN) {
4038 /* Clock not in sync and XRC is enabled.
4039 * Try again later.
4040 */
4041 dasd_sfree_request(cqr, startdev);
4042 return ERR_PTR(-EAGAIN);
4043 }
4044 idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data));
4045 } else {
4046 if (define_extent(ccw++, cqr->data, first_trk,
4047 last_trk, cmd, basedev, 0) == -EAGAIN) {
4048 /* Clock not in sync and XRC is enabled.
4049 * Try again later.
4050 */
4051 dasd_sfree_request(cqr, startdev);
4052 return ERR_PTR(-EAGAIN);
4053 }
4054 idaws = (dma64_t *)(cqr->data + sizeof(struct DE_eckd_data));
4055 }
4056 /* Build locate_record+read/write/ccws. */
4057 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
4058 recid = first_rec;
4059 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
4060 /* Only standard blocks so there is just one locate record. */
4061 ccw[-1].flags |= CCW_FLAG_CC;
4062 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
4063 last_rec - recid + 1, cmd, basedev, blksize);
4064 }
4065 rq_for_each_segment(bv, req, iter) {
4066 dst = bvec_virt(&bv);
4067 if (dasd_page_cache) {
4068 char *copy = kmem_cache_alloc(dasd_page_cache,
4069 GFP_DMA | __GFP_NOWARN);
4070 if (copy && rq_data_dir(req) == WRITE)
4071 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
4072 if (copy)
4073 dst = copy + bv.bv_offset;
4074 }
4075 for (off = 0; off < bv.bv_len; off += blksize) {
4076 sector_t trkid = recid;
4077 unsigned int recoffs = sector_div(trkid, blk_per_trk);
4078 rcmd = cmd;
4079 count = blksize;
4080 /* Locate record for cdl special block ? */
4081 if (private->uses_cdl && recid < 2*blk_per_trk) {
4082 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
4083 rcmd |= 0x8;
4084 count = dasd_eckd_cdl_reclen(recid);
4085 if (count < blksize &&
4086 rq_data_dir(req) == READ)
4087 memset(dst + count, 0xe5,
4088 blksize - count);
4089 }
4090 ccw[-1].flags |= CCW_FLAG_CC;
4091 locate_record(ccw++, LO_data++,
4092 trkid, recoffs + 1,
4093 1, rcmd, basedev, count);
4094 }
4095 /* Locate record for standard blocks ? */
4096 if (private->uses_cdl && recid == 2*blk_per_trk) {
4097 ccw[-1].flags |= CCW_FLAG_CC;
4098 locate_record(ccw++, LO_data++,
4099 trkid, recoffs + 1,
4100 last_rec - recid + 1,
4101 cmd, basedev, count);
4102 }
4103 /* Read/write ccw. */
4104 ccw[-1].flags |= CCW_FLAG_CC;
4105 ccw->cmd_code = rcmd;
4106 ccw->count = count;
4107 if (idal_is_needed(dst, blksize)) {
4108 ccw->cda = virt_to_dma32(idaws);
4109 ccw->flags = CCW_FLAG_IDA;
4110 idaws = idal_create_words(idaws, dst, blksize);
4111 } else {
4112 ccw->cda = virt_to_dma32(dst);
4113 ccw->flags = 0;
4114 }
4115 ccw++;
4116 dst += blksize;
4117 recid++;
4118 }
4119 }
4120 if (blk_noretry_request(req) ||
4121 block->base->features & DASD_FEATURE_FAILFAST)
4122 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4123 cqr->startdev = startdev;
4124 cqr->memdev = startdev;
4125 cqr->block = block;
4126 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4127 cqr->lpm = dasd_path_get_ppm(startdev);
4128 cqr->retries = startdev->default_retries;
4129 cqr->buildclk = get_tod_clock();
4130 cqr->status = DASD_CQR_FILLED;
4131
4132 /* Set flags to suppress output for expected errors */
4133 if (dasd_eckd_is_ese(basedev)) {
4134 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4135 }
4136
4137 return cqr;
4138 }
4139
dasd_eckd_build_cp_cmd_track(struct dasd_device * startdev,struct dasd_block * block,struct request * req,sector_t first_rec,sector_t last_rec,sector_t first_trk,sector_t last_trk,unsigned int first_offs,unsigned int last_offs,unsigned int blk_per_trk,unsigned int blksize)4140 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
4141 struct dasd_device *startdev,
4142 struct dasd_block *block,
4143 struct request *req,
4144 sector_t first_rec,
4145 sector_t last_rec,
4146 sector_t first_trk,
4147 sector_t last_trk,
4148 unsigned int first_offs,
4149 unsigned int last_offs,
4150 unsigned int blk_per_trk,
4151 unsigned int blksize)
4152 {
4153 dma64_t *idaws;
4154 struct dasd_ccw_req *cqr;
4155 struct ccw1 *ccw;
4156 struct req_iterator iter;
4157 struct bio_vec bv;
4158 char *dst, *idaw_dst;
4159 unsigned int cidaw, cplength, datasize;
4160 unsigned int tlf;
4161 sector_t recid;
4162 unsigned char cmd;
4163 struct dasd_device *basedev;
4164 unsigned int trkcount, count, count_to_trk_end;
4165 unsigned int idaw_len, seg_len, part_len, len_to_track_end;
4166 unsigned char new_track, end_idaw;
4167 sector_t trkid;
4168 unsigned int recoffs;
4169
4170 basedev = block->base;
4171 if (rq_data_dir(req) == READ)
4172 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4173 else if (rq_data_dir(req) == WRITE)
4174 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4175 else
4176 return ERR_PTR(-EINVAL);
4177
4178 /* Track based I/O needs IDAWs for each page, and not just for
4179 * 64 bit addresses. We need additional idals for pages
4180 * that get filled from two tracks, so we use the number
4181 * of records as upper limit.
4182 */
4183 cidaw = last_rec - first_rec + 1;
4184 trkcount = last_trk - first_trk + 1;
4185
4186 /* 1x prefix + one read/write ccw per track */
4187 cplength = 1 + trkcount;
4188
4189 datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
4190
4191 /* Allocate the ccw request. */
4192 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4193 startdev, blk_mq_rq_to_pdu(req));
4194 if (IS_ERR(cqr))
4195 return cqr;
4196 ccw = cqr->cpaddr;
4197 /* transfer length factor: how many bytes to read from the last track */
4198 if (first_trk == last_trk)
4199 tlf = last_offs - first_offs + 1;
4200 else
4201 tlf = last_offs + 1;
4202 tlf *= blksize;
4203
4204 if (prefix_LRE(ccw++, cqr->data, first_trk,
4205 last_trk, cmd, basedev, startdev,
4206 1 /* format */, first_offs + 1,
4207 trkcount, blksize,
4208 tlf) == -EAGAIN) {
4209 /* Clock not in sync and XRC is enabled.
4210 * Try again later.
4211 */
4212 dasd_sfree_request(cqr, startdev);
4213 return ERR_PTR(-EAGAIN);
4214 }
4215
4216 /*
4217 * The translation of request into ccw programs must meet the
4218 * following conditions:
4219 * - all idaws but the first and the last must address full pages
4220 * (or 2K blocks on 31-bit)
4221 * - the scope of a ccw and it's idal ends with the track boundaries
4222 */
4223 idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data));
4224 recid = first_rec;
4225 new_track = 1;
4226 end_idaw = 0;
4227 len_to_track_end = 0;
4228 idaw_dst = NULL;
4229 idaw_len = 0;
4230 rq_for_each_segment(bv, req, iter) {
4231 dst = bvec_virt(&bv);
4232 seg_len = bv.bv_len;
4233 while (seg_len) {
4234 if (new_track) {
4235 trkid = recid;
4236 recoffs = sector_div(trkid, blk_per_trk);
4237 count_to_trk_end = blk_per_trk - recoffs;
4238 count = min((last_rec - recid + 1),
4239 (sector_t)count_to_trk_end);
4240 len_to_track_end = count * blksize;
4241 ccw[-1].flags |= CCW_FLAG_CC;
4242 ccw->cmd_code = cmd;
4243 ccw->count = len_to_track_end;
4244 ccw->cda = virt_to_dma32(idaws);
4245 ccw->flags = CCW_FLAG_IDA;
4246 ccw++;
4247 recid += count;
4248 new_track = 0;
4249 /* first idaw for a ccw may start anywhere */
4250 if (!idaw_dst)
4251 idaw_dst = dst;
4252 }
4253 /* If we start a new idaw, we must make sure that it
4254 * starts on an IDA_BLOCK_SIZE boundary.
4255 * If we continue an idaw, we must make sure that the
4256 * current segment begins where the so far accumulated
4257 * idaw ends
4258 */
4259 if (!idaw_dst) {
4260 if ((unsigned long)(dst) & (IDA_BLOCK_SIZE - 1)) {
4261 dasd_sfree_request(cqr, startdev);
4262 return ERR_PTR(-ERANGE);
4263 } else
4264 idaw_dst = dst;
4265 }
4266 if ((idaw_dst + idaw_len) != dst) {
4267 dasd_sfree_request(cqr, startdev);
4268 return ERR_PTR(-ERANGE);
4269 }
4270 part_len = min(seg_len, len_to_track_end);
4271 seg_len -= part_len;
4272 dst += part_len;
4273 idaw_len += part_len;
4274 len_to_track_end -= part_len;
4275 /* collected memory area ends on an IDA_BLOCK border,
4276 * -> create an idaw
4277 * idal_create_words will handle cases where idaw_len
4278 * is larger then IDA_BLOCK_SIZE
4279 */
4280 if (!((unsigned long)(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
4281 end_idaw = 1;
4282 /* We also need to end the idaw at track end */
4283 if (!len_to_track_end) {
4284 new_track = 1;
4285 end_idaw = 1;
4286 }
4287 if (end_idaw) {
4288 idaws = idal_create_words(idaws, idaw_dst,
4289 idaw_len);
4290 idaw_dst = NULL;
4291 idaw_len = 0;
4292 end_idaw = 0;
4293 }
4294 }
4295 }
4296
4297 if (blk_noretry_request(req) ||
4298 block->base->features & DASD_FEATURE_FAILFAST)
4299 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4300 cqr->startdev = startdev;
4301 cqr->memdev = startdev;
4302 cqr->block = block;
4303 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4304 cqr->lpm = dasd_path_get_ppm(startdev);
4305 cqr->retries = startdev->default_retries;
4306 cqr->buildclk = get_tod_clock();
4307 cqr->status = DASD_CQR_FILLED;
4308
4309 /* Set flags to suppress output for expected errors */
4310 if (dasd_eckd_is_ese(basedev))
4311 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4312
4313 return cqr;
4314 }
4315
prepare_itcw(struct itcw * itcw,unsigned int trk,unsigned int totrk,int cmd,struct dasd_device * basedev,struct dasd_device * startdev,unsigned int rec_on_trk,int count,unsigned int blksize,unsigned int total_data_size,unsigned int tlf,unsigned int blk_per_trk)4316 static int prepare_itcw(struct itcw *itcw,
4317 unsigned int trk, unsigned int totrk, int cmd,
4318 struct dasd_device *basedev,
4319 struct dasd_device *startdev,
4320 unsigned int rec_on_trk, int count,
4321 unsigned int blksize,
4322 unsigned int total_data_size,
4323 unsigned int tlf,
4324 unsigned int blk_per_trk)
4325 {
4326 struct PFX_eckd_data pfxdata;
4327 struct dasd_eckd_private *basepriv, *startpriv;
4328 struct DE_eckd_data *dedata;
4329 struct LRE_eckd_data *lredata;
4330 struct dcw *dcw;
4331
4332 u32 begcyl, endcyl;
4333 u16 heads, beghead, endhead;
4334 u8 pfx_cmd;
4335
4336 int rc = 0;
4337 int sector = 0;
4338 int dn, d;
4339
4340
4341 /* setup prefix data */
4342 basepriv = basedev->private;
4343 startpriv = startdev->private;
4344 dedata = &pfxdata.define_extent;
4345 lredata = &pfxdata.locate_record;
4346
4347 memset(&pfxdata, 0, sizeof(pfxdata));
4348 pfxdata.format = 1; /* PFX with LRE */
4349 pfxdata.base_address = basepriv->conf.ned->unit_addr;
4350 pfxdata.base_lss = basepriv->conf.ned->ID;
4351 pfxdata.validity.define_extent = 1;
4352
4353 /* private uid is kept up to date, conf_data may be outdated */
4354 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4355 pfxdata.validity.verify_base = 1;
4356
4357 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
4358 pfxdata.validity.verify_base = 1;
4359 pfxdata.validity.hyper_pav = 1;
4360 }
4361
4362 switch (cmd) {
4363 case DASD_ECKD_CCW_READ_TRACK_DATA:
4364 dedata->mask.perm = 0x1;
4365 dedata->attributes.operation = basepriv->attrib.operation;
4366 dedata->blk_size = blksize;
4367 dedata->ga_extended |= 0x42;
4368 lredata->operation.orientation = 0x0;
4369 lredata->operation.operation = 0x0C;
4370 lredata->auxiliary.check_bytes = 0x01;
4371 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4372 break;
4373 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4374 dedata->mask.perm = 0x02;
4375 dedata->attributes.operation = basepriv->attrib.operation;
4376 dedata->blk_size = blksize;
4377 rc = set_timestamp(NULL, dedata, basedev);
4378 dedata->ga_extended |= 0x42;
4379 lredata->operation.orientation = 0x0;
4380 lredata->operation.operation = 0x3F;
4381 lredata->extended_operation = 0x23;
4382 lredata->auxiliary.check_bytes = 0x2;
4383 /*
4384 * If XRC is supported the System Time Stamp is set. The
4385 * validity of the time stamp must be reflected in the prefix
4386 * data as well.
4387 */
4388 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4389 pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
4390 pfx_cmd = DASD_ECKD_CCW_PFX;
4391 break;
4392 case DASD_ECKD_CCW_READ_COUNT_MT:
4393 dedata->mask.perm = 0x1;
4394 dedata->attributes.operation = DASD_BYPASS_CACHE;
4395 dedata->ga_extended |= 0x42;
4396 dedata->blk_size = blksize;
4397 lredata->operation.orientation = 0x2;
4398 lredata->operation.operation = 0x16;
4399 lredata->auxiliary.check_bytes = 0x01;
4400 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4401 break;
4402 default:
4403 DBF_DEV_EVENT(DBF_ERR, basedev,
4404 "prepare itcw, unknown opcode 0x%x", cmd);
4405 BUG();
4406 break;
4407 }
4408 if (rc)
4409 return rc;
4410
4411 dedata->attributes.mode = 0x3; /* ECKD */
4412
4413 heads = basepriv->rdc_data.trk_per_cyl;
4414 begcyl = trk / heads;
4415 beghead = trk % heads;
4416 endcyl = totrk / heads;
4417 endhead = totrk % heads;
4418
4419 /* check for sequential prestage - enhance cylinder range */
4420 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4421 dedata->attributes.operation == DASD_SEQ_ACCESS) {
4422
4423 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4424 endcyl += basepriv->attrib.nr_cyl;
4425 else
4426 endcyl = (basepriv->real_cyl - 1);
4427 }
4428
4429 set_ch_t(&dedata->beg_ext, begcyl, beghead);
4430 set_ch_t(&dedata->end_ext, endcyl, endhead);
4431
4432 dedata->ep_format = 0x20; /* records per track is valid */
4433 dedata->ep_rec_per_track = blk_per_trk;
4434
4435 if (rec_on_trk) {
4436 switch (basepriv->rdc_data.dev_type) {
4437 case 0x3390:
4438 dn = ceil_quot(blksize + 6, 232);
4439 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4440 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4441 break;
4442 case 0x3380:
4443 d = 7 + ceil_quot(blksize + 12, 32);
4444 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4445 break;
4446 }
4447 }
4448
4449 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4450 lredata->auxiliary.length_valid = 0;
4451 lredata->auxiliary.length_scope = 0;
4452 lredata->sector = 0xff;
4453 } else {
4454 lredata->auxiliary.length_valid = 1;
4455 lredata->auxiliary.length_scope = 1;
4456 lredata->sector = sector;
4457 }
4458 lredata->auxiliary.imbedded_ccw_valid = 1;
4459 lredata->length = tlf;
4460 lredata->imbedded_ccw = cmd;
4461 lredata->count = count;
4462 set_ch_t(&lredata->seek_addr, begcyl, beghead);
4463 lredata->search_arg.cyl = lredata->seek_addr.cyl;
4464 lredata->search_arg.head = lredata->seek_addr.head;
4465 lredata->search_arg.record = rec_on_trk;
4466
4467 dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4468 &pfxdata, sizeof(pfxdata), total_data_size);
4469 return PTR_ERR_OR_ZERO(dcw);
4470 }
4471
dasd_eckd_build_cp_tpm_track(struct dasd_device * startdev,struct dasd_block * block,struct request * req,sector_t first_rec,sector_t last_rec,sector_t first_trk,sector_t last_trk,unsigned int first_offs,unsigned int last_offs,unsigned int blk_per_trk,unsigned int blksize)4472 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4473 struct dasd_device *startdev,
4474 struct dasd_block *block,
4475 struct request *req,
4476 sector_t first_rec,
4477 sector_t last_rec,
4478 sector_t first_trk,
4479 sector_t last_trk,
4480 unsigned int first_offs,
4481 unsigned int last_offs,
4482 unsigned int blk_per_trk,
4483 unsigned int blksize)
4484 {
4485 struct dasd_ccw_req *cqr;
4486 struct req_iterator iter;
4487 struct bio_vec bv;
4488 char *dst;
4489 unsigned int trkcount, ctidaw;
4490 unsigned char cmd;
4491 struct dasd_device *basedev;
4492 unsigned int tlf;
4493 struct itcw *itcw;
4494 struct tidaw *last_tidaw = NULL;
4495 int itcw_op;
4496 size_t itcw_size;
4497 u8 tidaw_flags;
4498 unsigned int seg_len, part_len, len_to_track_end;
4499 unsigned char new_track;
4500 sector_t recid, trkid;
4501 unsigned int offs;
4502 unsigned int count, count_to_trk_end;
4503 int ret;
4504
4505 basedev = block->base;
4506 if (rq_data_dir(req) == READ) {
4507 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4508 itcw_op = ITCW_OP_READ;
4509 } else if (rq_data_dir(req) == WRITE) {
4510 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4511 itcw_op = ITCW_OP_WRITE;
4512 } else
4513 return ERR_PTR(-EINVAL);
4514
4515 /* trackbased I/O needs address all memory via TIDAWs,
4516 * not just for 64 bit addresses. This allows us to map
4517 * each segment directly to one tidaw.
4518 * In the case of write requests, additional tidaws may
4519 * be needed when a segment crosses a track boundary.
4520 */
4521 trkcount = last_trk - first_trk + 1;
4522 ctidaw = 0;
4523 rq_for_each_segment(bv, req, iter) {
4524 ++ctidaw;
4525 }
4526 if (rq_data_dir(req) == WRITE)
4527 ctidaw += (last_trk - first_trk);
4528
4529 /* Allocate the ccw request. */
4530 itcw_size = itcw_calc_size(0, ctidaw, 0);
4531 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4532 blk_mq_rq_to_pdu(req));
4533 if (IS_ERR(cqr))
4534 return cqr;
4535
4536 /* transfer length factor: how many bytes to read from the last track */
4537 if (first_trk == last_trk)
4538 tlf = last_offs - first_offs + 1;
4539 else
4540 tlf = last_offs + 1;
4541 tlf *= blksize;
4542
4543 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4544 if (IS_ERR(itcw)) {
4545 ret = -EINVAL;
4546 goto out_error;
4547 }
4548 cqr->cpaddr = itcw_get_tcw(itcw);
4549 if (prepare_itcw(itcw, first_trk, last_trk,
4550 cmd, basedev, startdev,
4551 first_offs + 1,
4552 trkcount, blksize,
4553 (last_rec - first_rec + 1) * blksize,
4554 tlf, blk_per_trk) == -EAGAIN) {
4555 /* Clock not in sync and XRC is enabled.
4556 * Try again later.
4557 */
4558 ret = -EAGAIN;
4559 goto out_error;
4560 }
4561 len_to_track_end = 0;
4562 /*
4563 * A tidaw can address 4k of memory, but must not cross page boundaries
4564 * We can let the block layer handle this by setting seg_boundary_mask
4565 * to page boundaries and max_segment_size to page size when setting up
4566 * the request queue.
4567 * For write requests, a TIDAW must not cross track boundaries, because
4568 * we have to set the CBC flag on the last tidaw for each track.
4569 */
4570 if (rq_data_dir(req) == WRITE) {
4571 new_track = 1;
4572 recid = first_rec;
4573 rq_for_each_segment(bv, req, iter) {
4574 dst = bvec_virt(&bv);
4575 seg_len = bv.bv_len;
4576 while (seg_len) {
4577 if (new_track) {
4578 trkid = recid;
4579 offs = sector_div(trkid, blk_per_trk);
4580 count_to_trk_end = blk_per_trk - offs;
4581 count = min((last_rec - recid + 1),
4582 (sector_t)count_to_trk_end);
4583 len_to_track_end = count * blksize;
4584 recid += count;
4585 new_track = 0;
4586 }
4587 part_len = min(seg_len, len_to_track_end);
4588 seg_len -= part_len;
4589 len_to_track_end -= part_len;
4590 /* We need to end the tidaw at track end */
4591 if (!len_to_track_end) {
4592 new_track = 1;
4593 tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4594 } else
4595 tidaw_flags = 0;
4596 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4597 dst, part_len);
4598 if (IS_ERR(last_tidaw)) {
4599 ret = -EINVAL;
4600 goto out_error;
4601 }
4602 dst += part_len;
4603 }
4604 }
4605 } else {
4606 rq_for_each_segment(bv, req, iter) {
4607 dst = bvec_virt(&bv);
4608 last_tidaw = itcw_add_tidaw(itcw, 0x00,
4609 dst, bv.bv_len);
4610 if (IS_ERR(last_tidaw)) {
4611 ret = -EINVAL;
4612 goto out_error;
4613 }
4614 }
4615 }
4616 last_tidaw->flags |= TIDAW_FLAGS_LAST;
4617 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4618 itcw_finalize(itcw);
4619
4620 if (blk_noretry_request(req) ||
4621 block->base->features & DASD_FEATURE_FAILFAST)
4622 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4623 cqr->cpmode = 1;
4624 cqr->startdev = startdev;
4625 cqr->memdev = startdev;
4626 cqr->block = block;
4627 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4628 cqr->lpm = dasd_path_get_ppm(startdev);
4629 cqr->retries = startdev->default_retries;
4630 cqr->buildclk = get_tod_clock();
4631 cqr->status = DASD_CQR_FILLED;
4632
4633 /* Set flags to suppress output for expected errors */
4634 if (dasd_eckd_is_ese(basedev)) {
4635 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4636 set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
4637 }
4638
4639 return cqr;
4640 out_error:
4641 dasd_sfree_request(cqr, startdev);
4642 return ERR_PTR(ret);
4643 }
4644
dasd_eckd_build_cp(struct dasd_device * startdev,struct dasd_block * block,struct request * req)4645 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4646 struct dasd_block *block,
4647 struct request *req)
4648 {
4649 int cmdrtd, cmdwtd;
4650 int use_prefix;
4651 int fcx_multitrack;
4652 struct dasd_eckd_private *private;
4653 struct dasd_device *basedev;
4654 sector_t first_rec, last_rec;
4655 sector_t first_trk, last_trk;
4656 unsigned int first_offs, last_offs;
4657 unsigned int blk_per_trk, blksize;
4658 int cdlspecial;
4659 unsigned int data_size;
4660 struct dasd_ccw_req *cqr;
4661
4662 basedev = block->base;
4663 private = basedev->private;
4664
4665 /* Calculate number of blocks/records per track. */
4666 blksize = block->bp_block;
4667 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4668 if (blk_per_trk == 0)
4669 return ERR_PTR(-EINVAL);
4670 /* Calculate record id of first and last block. */
4671 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4672 first_offs = sector_div(first_trk, blk_per_trk);
4673 last_rec = last_trk =
4674 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4675 last_offs = sector_div(last_trk, blk_per_trk);
4676 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4677
4678 fcx_multitrack = private->features.feature[40] & 0x20;
4679 data_size = blk_rq_bytes(req);
4680 if (data_size % blksize)
4681 return ERR_PTR(-EINVAL);
4682 /* tpm write request add CBC data on each track boundary */
4683 if (rq_data_dir(req) == WRITE)
4684 data_size += (last_trk - first_trk) * 4;
4685
4686 /* is read track data and write track data in command mode supported? */
4687 cmdrtd = private->features.feature[9] & 0x20;
4688 cmdwtd = private->features.feature[12] & 0x40;
4689 use_prefix = private->features.feature[8] & 0x01;
4690
4691 cqr = NULL;
4692 if (cdlspecial || dasd_page_cache) {
4693 /* do nothing, just fall through to the cmd mode single case */
4694 } else if ((data_size <= private->fcx_max_data)
4695 && (fcx_multitrack || (first_trk == last_trk))) {
4696 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4697 first_rec, last_rec,
4698 first_trk, last_trk,
4699 first_offs, last_offs,
4700 blk_per_trk, blksize);
4701 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4702 (PTR_ERR(cqr) != -ENOMEM))
4703 cqr = NULL;
4704 } else if (use_prefix &&
4705 (((rq_data_dir(req) == READ) && cmdrtd) ||
4706 ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4707 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4708 first_rec, last_rec,
4709 first_trk, last_trk,
4710 first_offs, last_offs,
4711 blk_per_trk, blksize);
4712 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4713 (PTR_ERR(cqr) != -ENOMEM))
4714 cqr = NULL;
4715 }
4716 if (!cqr)
4717 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4718 first_rec, last_rec,
4719 first_trk, last_trk,
4720 first_offs, last_offs,
4721 blk_per_trk, blksize);
4722 return cqr;
4723 }
4724
dasd_eckd_build_cp_raw(struct dasd_device * startdev,struct dasd_block * block,struct request * req)4725 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4726 struct dasd_block *block,
4727 struct request *req)
4728 {
4729 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4730 unsigned int seg_len, len_to_track_end;
4731 unsigned int cidaw, cplength, datasize;
4732 sector_t first_trk, last_trk, sectors;
4733 struct dasd_eckd_private *base_priv;
4734 struct dasd_device *basedev;
4735 struct req_iterator iter;
4736 struct dasd_ccw_req *cqr;
4737 unsigned int trkcount;
4738 unsigned int size;
4739 unsigned char cmd;
4740 struct bio_vec bv;
4741 struct ccw1 *ccw;
4742 dma64_t *idaws;
4743 int use_prefix;
4744 void *data;
4745 char *dst;
4746
4747 /*
4748 * raw track access needs to be mutiple of 64k and on 64k boundary
4749 * For read requests we can fix an incorrect alignment by padding
4750 * the request with dummy pages.
4751 */
4752 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4753 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4754 DASD_RAW_SECTORS_PER_TRACK;
4755 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4756 DASD_RAW_SECTORS_PER_TRACK;
4757 basedev = block->base;
4758 if ((start_padding_sectors || end_padding_sectors) &&
4759 (rq_data_dir(req) == WRITE)) {
4760 DBF_DEV_EVENT(DBF_ERR, basedev,
4761 "raw write not track aligned (%llu,%llu) req %p",
4762 start_padding_sectors, end_padding_sectors, req);
4763 return ERR_PTR(-EINVAL);
4764 }
4765
4766 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4767 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4768 DASD_RAW_SECTORS_PER_TRACK;
4769 trkcount = last_trk - first_trk + 1;
4770
4771 if (rq_data_dir(req) == READ)
4772 cmd = DASD_ECKD_CCW_READ_TRACK;
4773 else if (rq_data_dir(req) == WRITE)
4774 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4775 else
4776 return ERR_PTR(-EINVAL);
4777
4778 /*
4779 * Raw track based I/O needs IDAWs for each page,
4780 * and not just for 64 bit addresses.
4781 */
4782 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4783
4784 /*
4785 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4786 * of extended parameter. This is needed for write full track.
4787 */
4788 base_priv = basedev->private;
4789 use_prefix = base_priv->features.feature[8] & 0x01;
4790 if (use_prefix) {
4791 cplength = 1 + trkcount;
4792 size = sizeof(struct PFX_eckd_data) + 2;
4793 } else {
4794 cplength = 2 + trkcount;
4795 size = sizeof(struct DE_eckd_data) +
4796 sizeof(struct LRE_eckd_data) + 2;
4797 }
4798 size = ALIGN(size, 8);
4799
4800 datasize = size + cidaw * sizeof(unsigned long);
4801
4802 /* Allocate the ccw request. */
4803 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4804 datasize, startdev, blk_mq_rq_to_pdu(req));
4805 if (IS_ERR(cqr))
4806 return cqr;
4807
4808 ccw = cqr->cpaddr;
4809 data = cqr->data;
4810
4811 if (use_prefix) {
4812 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4813 startdev, 1, 0, trkcount, 0, 0);
4814 } else {
4815 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4816 ccw[-1].flags |= CCW_FLAG_CC;
4817
4818 data += sizeof(struct DE_eckd_data);
4819 locate_record_ext(ccw++, data, first_trk, 0,
4820 trkcount, cmd, basedev, 0, 0);
4821 }
4822
4823 idaws = (dma64_t *)(cqr->data + size);
4824 len_to_track_end = 0;
4825 if (start_padding_sectors) {
4826 ccw[-1].flags |= CCW_FLAG_CC;
4827 ccw->cmd_code = cmd;
4828 /* maximum 3390 track size */
4829 ccw->count = 57326;
4830 /* 64k map to one track */
4831 len_to_track_end = 65536 - start_padding_sectors * 512;
4832 ccw->cda = virt_to_dma32(idaws);
4833 ccw->flags |= CCW_FLAG_IDA;
4834 ccw->flags |= CCW_FLAG_SLI;
4835 ccw++;
4836 for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4837 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4838 }
4839 rq_for_each_segment(bv, req, iter) {
4840 dst = bvec_virt(&bv);
4841 seg_len = bv.bv_len;
4842 if (cmd == DASD_ECKD_CCW_READ_TRACK)
4843 memset(dst, 0, seg_len);
4844 if (!len_to_track_end) {
4845 ccw[-1].flags |= CCW_FLAG_CC;
4846 ccw->cmd_code = cmd;
4847 /* maximum 3390 track size */
4848 ccw->count = 57326;
4849 /* 64k map to one track */
4850 len_to_track_end = 65536;
4851 ccw->cda = virt_to_dma32(idaws);
4852 ccw->flags |= CCW_FLAG_IDA;
4853 ccw->flags |= CCW_FLAG_SLI;
4854 ccw++;
4855 }
4856 len_to_track_end -= seg_len;
4857 idaws = idal_create_words(idaws, dst, seg_len);
4858 }
4859 for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4860 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4861 if (blk_noretry_request(req) ||
4862 block->base->features & DASD_FEATURE_FAILFAST)
4863 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4864 cqr->startdev = startdev;
4865 cqr->memdev = startdev;
4866 cqr->block = block;
4867 cqr->expires = startdev->default_expires * HZ;
4868 cqr->lpm = dasd_path_get_ppm(startdev);
4869 cqr->retries = startdev->default_retries;
4870 cqr->buildclk = get_tod_clock();
4871 cqr->status = DASD_CQR_FILLED;
4872
4873 return cqr;
4874 }
4875
4876
4877 static int
dasd_eckd_free_cp(struct dasd_ccw_req * cqr,struct request * req)4878 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4879 {
4880 struct dasd_eckd_private *private;
4881 struct ccw1 *ccw;
4882 struct req_iterator iter;
4883 struct bio_vec bv;
4884 char *dst, *cda;
4885 unsigned int blksize, blk_per_trk, off;
4886 sector_t recid;
4887 int status;
4888
4889 if (!dasd_page_cache)
4890 goto out;
4891 private = cqr->block->base->private;
4892 blksize = cqr->block->bp_block;
4893 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4894 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4895 ccw = cqr->cpaddr;
4896 /* Skip over define extent & locate record. */
4897 ccw++;
4898 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4899 ccw++;
4900 rq_for_each_segment(bv, req, iter) {
4901 dst = bvec_virt(&bv);
4902 for (off = 0; off < bv.bv_len; off += blksize) {
4903 /* Skip locate record. */
4904 if (private->uses_cdl && recid <= 2*blk_per_trk)
4905 ccw++;
4906 if (dst) {
4907 if (ccw->flags & CCW_FLAG_IDA)
4908 cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda)));
4909 else
4910 cda = dma32_to_virt(ccw->cda);
4911 if (dst != cda) {
4912 if (rq_data_dir(req) == READ)
4913 memcpy(dst, cda, bv.bv_len);
4914 kmem_cache_free(dasd_page_cache,
4915 (void *)((addr_t)cda & PAGE_MASK));
4916 }
4917 dst = NULL;
4918 }
4919 ccw++;
4920 recid++;
4921 }
4922 }
4923 out:
4924 status = cqr->status == DASD_CQR_DONE;
4925 dasd_sfree_request(cqr, cqr->memdev);
4926 return status;
4927 }
4928
4929 /*
4930 * Modify ccw/tcw in cqr so it can be started on a base device.
4931 *
4932 * Note that this is not enough to restart the cqr!
4933 * Either reset cqr->startdev as well (summary unit check handling)
4934 * or restart via separate cqr (as in ERP handling).
4935 */
dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req * cqr)4936 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4937 {
4938 struct ccw1 *ccw;
4939 struct PFX_eckd_data *pfxdata;
4940 struct tcw *tcw;
4941 struct tccb *tccb;
4942 struct dcw *dcw;
4943
4944 if (cqr->cpmode == 1) {
4945 tcw = cqr->cpaddr;
4946 tccb = tcw_get_tccb(tcw);
4947 dcw = (struct dcw *)&tccb->tca[0];
4948 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4949 pfxdata->validity.verify_base = 0;
4950 pfxdata->validity.hyper_pav = 0;
4951 } else {
4952 ccw = cqr->cpaddr;
4953 pfxdata = cqr->data;
4954 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4955 pfxdata->validity.verify_base = 0;
4956 pfxdata->validity.hyper_pav = 0;
4957 }
4958 }
4959 }
4960
4961 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4962
dasd_eckd_build_alias_cp(struct dasd_device * base,struct dasd_block * block,struct request * req)4963 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4964 struct dasd_block *block,
4965 struct request *req)
4966 {
4967 struct dasd_eckd_private *private;
4968 struct dasd_device *startdev;
4969 unsigned long flags;
4970 struct dasd_ccw_req *cqr;
4971
4972 startdev = dasd_alias_get_start_dev(base);
4973 if (!startdev)
4974 startdev = base;
4975 private = startdev->private;
4976 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4977 return ERR_PTR(-EBUSY);
4978
4979 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4980 private->count++;
4981 if ((base->features & DASD_FEATURE_USERAW))
4982 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4983 else
4984 cqr = dasd_eckd_build_cp(startdev, block, req);
4985 if (IS_ERR(cqr))
4986 private->count--;
4987 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4988 return cqr;
4989 }
4990
dasd_eckd_free_alias_cp(struct dasd_ccw_req * cqr,struct request * req)4991 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4992 struct request *req)
4993 {
4994 struct dasd_eckd_private *private;
4995 unsigned long flags;
4996
4997 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4998 private = cqr->memdev->private;
4999 private->count--;
5000 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
5001 return dasd_eckd_free_cp(cqr, req);
5002 }
5003
5004 static int
dasd_eckd_fill_info(struct dasd_device * device,struct dasd_information2_t * info)5005 dasd_eckd_fill_info(struct dasd_device * device,
5006 struct dasd_information2_t * info)
5007 {
5008 struct dasd_eckd_private *private = device->private;
5009
5010 info->label_block = 2;
5011 info->FBA_layout = private->uses_cdl ? 0 : 1;
5012 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
5013 info->characteristics_size = sizeof(private->rdc_data);
5014 memcpy(info->characteristics, &private->rdc_data,
5015 sizeof(private->rdc_data));
5016 info->confdata_size = min_t(unsigned long, private->conf.len,
5017 sizeof(info->configuration_data));
5018 memcpy(info->configuration_data, private->conf.data,
5019 info->confdata_size);
5020 return 0;
5021 }
5022
5023 /*
5024 * SECTION: ioctl functions for eckd devices.
5025 */
5026
5027 /*
5028 * Release device ioctl.
5029 * Buils a channel programm to releases a prior reserved
5030 * (see dasd_eckd_reserve) device.
5031 */
5032 static int
dasd_eckd_release(struct dasd_device * device)5033 dasd_eckd_release(struct dasd_device *device)
5034 {
5035 struct dasd_ccw_req *cqr;
5036 int rc;
5037 struct ccw1 *ccw;
5038 int useglobal;
5039
5040 if (!capable(CAP_SYS_ADMIN))
5041 return -EACCES;
5042
5043 useglobal = 0;
5044 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5045 if (IS_ERR(cqr)) {
5046 mutex_lock(&dasd_reserve_mutex);
5047 useglobal = 1;
5048 cqr = &dasd_reserve_req->cqr;
5049 memset(cqr, 0, sizeof(*cqr));
5050 memset(&dasd_reserve_req->ccw, 0,
5051 sizeof(dasd_reserve_req->ccw));
5052 cqr->cpaddr = &dasd_reserve_req->ccw;
5053 cqr->data = &dasd_reserve_req->data;
5054 cqr->magic = DASD_ECKD_MAGIC;
5055 }
5056 ccw = cqr->cpaddr;
5057 ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
5058 ccw->flags |= CCW_FLAG_SLI;
5059 ccw->count = 32;
5060 ccw->cda = virt_to_dma32(cqr->data);
5061 cqr->startdev = device;
5062 cqr->memdev = device;
5063 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5064 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5065 cqr->retries = 2; /* set retry counter to enable basic ERP */
5066 cqr->expires = 2 * HZ;
5067 cqr->buildclk = get_tod_clock();
5068 cqr->status = DASD_CQR_FILLED;
5069
5070 rc = dasd_sleep_on_immediatly(cqr);
5071 if (!rc)
5072 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5073
5074 if (useglobal)
5075 mutex_unlock(&dasd_reserve_mutex);
5076 else
5077 dasd_sfree_request(cqr, cqr->memdev);
5078 return rc;
5079 }
5080
5081 /*
5082 * Reserve device ioctl.
5083 * Options are set to 'synchronous wait for interrupt' and
5084 * 'timeout the request'. This leads to a terminate IO if
5085 * the interrupt is outstanding for a certain time.
5086 */
5087 static int
dasd_eckd_reserve(struct dasd_device * device)5088 dasd_eckd_reserve(struct dasd_device *device)
5089 {
5090 struct dasd_ccw_req *cqr;
5091 int rc;
5092 struct ccw1 *ccw;
5093 int useglobal;
5094
5095 if (!capable(CAP_SYS_ADMIN))
5096 return -EACCES;
5097
5098 useglobal = 0;
5099 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5100 if (IS_ERR(cqr)) {
5101 mutex_lock(&dasd_reserve_mutex);
5102 useglobal = 1;
5103 cqr = &dasd_reserve_req->cqr;
5104 memset(cqr, 0, sizeof(*cqr));
5105 memset(&dasd_reserve_req->ccw, 0,
5106 sizeof(dasd_reserve_req->ccw));
5107 cqr->cpaddr = &dasd_reserve_req->ccw;
5108 cqr->data = &dasd_reserve_req->data;
5109 cqr->magic = DASD_ECKD_MAGIC;
5110 }
5111 ccw = cqr->cpaddr;
5112 ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
5113 ccw->flags |= CCW_FLAG_SLI;
5114 ccw->count = 32;
5115 ccw->cda = virt_to_dma32(cqr->data);
5116 cqr->startdev = device;
5117 cqr->memdev = device;
5118 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5119 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5120 cqr->retries = 2; /* set retry counter to enable basic ERP */
5121 cqr->expires = 2 * HZ;
5122 cqr->buildclk = get_tod_clock();
5123 cqr->status = DASD_CQR_FILLED;
5124
5125 rc = dasd_sleep_on_immediatly(cqr);
5126 if (!rc)
5127 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5128
5129 if (useglobal)
5130 mutex_unlock(&dasd_reserve_mutex);
5131 else
5132 dasd_sfree_request(cqr, cqr->memdev);
5133 return rc;
5134 }
5135
5136 /*
5137 * Steal lock ioctl - unconditional reserve device.
5138 * Buils a channel programm to break a device's reservation.
5139 * (unconditional reserve)
5140 */
5141 static int
dasd_eckd_steal_lock(struct dasd_device * device)5142 dasd_eckd_steal_lock(struct dasd_device *device)
5143 {
5144 struct dasd_ccw_req *cqr;
5145 int rc;
5146 struct ccw1 *ccw;
5147 int useglobal;
5148
5149 if (!capable(CAP_SYS_ADMIN))
5150 return -EACCES;
5151
5152 useglobal = 0;
5153 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5154 if (IS_ERR(cqr)) {
5155 mutex_lock(&dasd_reserve_mutex);
5156 useglobal = 1;
5157 cqr = &dasd_reserve_req->cqr;
5158 memset(cqr, 0, sizeof(*cqr));
5159 memset(&dasd_reserve_req->ccw, 0,
5160 sizeof(dasd_reserve_req->ccw));
5161 cqr->cpaddr = &dasd_reserve_req->ccw;
5162 cqr->data = &dasd_reserve_req->data;
5163 cqr->magic = DASD_ECKD_MAGIC;
5164 }
5165 ccw = cqr->cpaddr;
5166 ccw->cmd_code = DASD_ECKD_CCW_SLCK;
5167 ccw->flags |= CCW_FLAG_SLI;
5168 ccw->count = 32;
5169 ccw->cda = virt_to_dma32(cqr->data);
5170 cqr->startdev = device;
5171 cqr->memdev = device;
5172 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5173 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5174 cqr->retries = 2; /* set retry counter to enable basic ERP */
5175 cqr->expires = 2 * HZ;
5176 cqr->buildclk = get_tod_clock();
5177 cqr->status = DASD_CQR_FILLED;
5178
5179 rc = dasd_sleep_on_immediatly(cqr);
5180 if (!rc)
5181 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5182
5183 if (useglobal)
5184 mutex_unlock(&dasd_reserve_mutex);
5185 else
5186 dasd_sfree_request(cqr, cqr->memdev);
5187 return rc;
5188 }
5189
5190 /*
5191 * SNID - Sense Path Group ID
5192 * This ioctl may be used in situations where I/O is stalled due to
5193 * a reserve, so if the normal dasd_smalloc_request fails, we use the
5194 * preallocated dasd_reserve_req.
5195 */
dasd_eckd_snid(struct dasd_device * device,void __user * argp)5196 static int dasd_eckd_snid(struct dasd_device *device,
5197 void __user *argp)
5198 {
5199 struct dasd_ccw_req *cqr;
5200 int rc;
5201 struct ccw1 *ccw;
5202 int useglobal;
5203 struct dasd_snid_ioctl_data usrparm;
5204
5205 if (!capable(CAP_SYS_ADMIN))
5206 return -EACCES;
5207
5208 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5209 return -EFAULT;
5210
5211 useglobal = 0;
5212 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
5213 sizeof(struct dasd_snid_data), device,
5214 NULL);
5215 if (IS_ERR(cqr)) {
5216 mutex_lock(&dasd_reserve_mutex);
5217 useglobal = 1;
5218 cqr = &dasd_reserve_req->cqr;
5219 memset(cqr, 0, sizeof(*cqr));
5220 memset(&dasd_reserve_req->ccw, 0,
5221 sizeof(dasd_reserve_req->ccw));
5222 cqr->cpaddr = &dasd_reserve_req->ccw;
5223 cqr->data = &dasd_reserve_req->data;
5224 cqr->magic = DASD_ECKD_MAGIC;
5225 }
5226 ccw = cqr->cpaddr;
5227 ccw->cmd_code = DASD_ECKD_CCW_SNID;
5228 ccw->flags |= CCW_FLAG_SLI;
5229 ccw->count = 12;
5230 ccw->cda = virt_to_dma32(cqr->data);
5231 cqr->startdev = device;
5232 cqr->memdev = device;
5233 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5234 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5235 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
5236 cqr->retries = 5;
5237 cqr->expires = 10 * HZ;
5238 cqr->buildclk = get_tod_clock();
5239 cqr->status = DASD_CQR_FILLED;
5240 cqr->lpm = usrparm.path_mask;
5241
5242 rc = dasd_sleep_on_immediatly(cqr);
5243 /* verify that I/O processing didn't modify the path mask */
5244 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
5245 rc = -EIO;
5246 if (!rc) {
5247 usrparm.data = *((struct dasd_snid_data *)cqr->data);
5248 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
5249 rc = -EFAULT;
5250 }
5251
5252 if (useglobal)
5253 mutex_unlock(&dasd_reserve_mutex);
5254 else
5255 dasd_sfree_request(cqr, cqr->memdev);
5256 return rc;
5257 }
5258
5259 /*
5260 * Read performance statistics
5261 */
5262 static int
dasd_eckd_performance(struct dasd_device * device,void __user * argp)5263 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
5264 {
5265 struct dasd_psf_prssd_data *prssdp;
5266 struct dasd_rssd_perf_stats_t *stats;
5267 struct dasd_ccw_req *cqr;
5268 struct ccw1 *ccw;
5269 int rc;
5270
5271 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5272 (sizeof(struct dasd_psf_prssd_data) +
5273 sizeof(struct dasd_rssd_perf_stats_t)),
5274 device, NULL);
5275 if (IS_ERR(cqr)) {
5276 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5277 "Could not allocate initialization request");
5278 return PTR_ERR(cqr);
5279 }
5280 cqr->startdev = device;
5281 cqr->memdev = device;
5282 cqr->retries = 0;
5283 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5284 cqr->expires = 10 * HZ;
5285
5286 /* Prepare for Read Subsystem Data */
5287 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5288 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5289 prssdp->order = PSF_ORDER_PRSSD;
5290 prssdp->suborder = 0x01; /* Performance Statistics */
5291 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
5292
5293 ccw = cqr->cpaddr;
5294 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5295 ccw->count = sizeof(struct dasd_psf_prssd_data);
5296 ccw->flags |= CCW_FLAG_CC;
5297 ccw->cda = virt_to_dma32(prssdp);
5298
5299 /* Read Subsystem Data - Performance Statistics */
5300 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5301 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
5302
5303 ccw++;
5304 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5305 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
5306 ccw->cda = virt_to_dma32(stats);
5307
5308 cqr->buildclk = get_tod_clock();
5309 cqr->status = DASD_CQR_FILLED;
5310 rc = dasd_sleep_on(cqr);
5311 if (rc == 0) {
5312 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5313 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5314 if (copy_to_user(argp, stats,
5315 sizeof(struct dasd_rssd_perf_stats_t)))
5316 rc = -EFAULT;
5317 }
5318 dasd_sfree_request(cqr, cqr->memdev);
5319 return rc;
5320 }
5321
5322 /*
5323 * Get attributes (cache operations)
5324 * Returnes the cache attributes used in Define Extend (DE).
5325 */
5326 static int
dasd_eckd_get_attrib(struct dasd_device * device,void __user * argp)5327 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
5328 {
5329 struct dasd_eckd_private *private = device->private;
5330 struct attrib_data_t attrib = private->attrib;
5331 int rc;
5332
5333 if (!capable(CAP_SYS_ADMIN))
5334 return -EACCES;
5335 if (!argp)
5336 return -EINVAL;
5337
5338 rc = 0;
5339 if (copy_to_user(argp, (long *) &attrib,
5340 sizeof(struct attrib_data_t)))
5341 rc = -EFAULT;
5342
5343 return rc;
5344 }
5345
5346 /*
5347 * Set attributes (cache operations)
5348 * Stores the attributes for cache operation to be used in Define Extend (DE).
5349 */
5350 static int
dasd_eckd_set_attrib(struct dasd_device * device,void __user * argp)5351 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
5352 {
5353 struct dasd_eckd_private *private = device->private;
5354 struct attrib_data_t attrib;
5355
5356 if (!capable(CAP_SYS_ADMIN))
5357 return -EACCES;
5358 if (!argp)
5359 return -EINVAL;
5360
5361 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
5362 return -EFAULT;
5363 private->attrib = attrib;
5364
5365 dev_info(&device->cdev->dev,
5366 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5367 private->attrib.operation, private->attrib.nr_cyl);
5368 return 0;
5369 }
5370
5371 /*
5372 * Issue syscall I/O to EMC Symmetrix array.
5373 * CCWs are PSF and RSSD
5374 */
dasd_symm_io(struct dasd_device * device,void __user * argp)5375 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5376 {
5377 struct dasd_symmio_parms usrparm;
5378 char *psf_data, *rssd_result;
5379 struct dasd_ccw_req *cqr;
5380 struct ccw1 *ccw;
5381 char psf0, psf1;
5382 int rc;
5383
5384 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5385 return -EACCES;
5386 psf0 = psf1 = 0;
5387
5388 /* Copy parms from caller */
5389 rc = -EFAULT;
5390 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5391 goto out;
5392 if (is_compat_task()) {
5393 /* Make sure pointers are sane even on 31 bit. */
5394 rc = -EINVAL;
5395 if ((usrparm.psf_data >> 32) != 0)
5396 goto out;
5397 if ((usrparm.rssd_result >> 32) != 0)
5398 goto out;
5399 usrparm.psf_data &= 0x7fffffffULL;
5400 usrparm.rssd_result &= 0x7fffffffULL;
5401 }
5402 /* at least 2 bytes are accessed and should be allocated */
5403 if (usrparm.psf_data_len < 2) {
5404 DBF_DEV_EVENT(DBF_WARNING, device,
5405 "Symmetrix ioctl invalid data length %d",
5406 usrparm.psf_data_len);
5407 rc = -EINVAL;
5408 goto out;
5409 }
5410 /* alloc I/O data area */
5411 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5412 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5413 if (!psf_data || !rssd_result) {
5414 rc = -ENOMEM;
5415 goto out_free;
5416 }
5417
5418 /* get syscall header from user space */
5419 rc = -EFAULT;
5420 if (copy_from_user(psf_data,
5421 (void __user *)(unsigned long) usrparm.psf_data,
5422 usrparm.psf_data_len))
5423 goto out_free;
5424 psf0 = psf_data[0];
5425 psf1 = psf_data[1];
5426
5427 /* setup CCWs for PSF + RSSD */
5428 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5429 if (IS_ERR(cqr)) {
5430 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5431 "Could not allocate initialization request");
5432 rc = PTR_ERR(cqr);
5433 goto out_free;
5434 }
5435
5436 cqr->startdev = device;
5437 cqr->memdev = device;
5438 cqr->retries = 3;
5439 cqr->expires = 10 * HZ;
5440 cqr->buildclk = get_tod_clock();
5441 cqr->status = DASD_CQR_FILLED;
5442
5443 /* Build the ccws */
5444 ccw = cqr->cpaddr;
5445
5446 /* PSF ccw */
5447 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5448 ccw->count = usrparm.psf_data_len;
5449 ccw->flags |= CCW_FLAG_CC;
5450 ccw->cda = virt_to_dma32(psf_data);
5451
5452 ccw++;
5453
5454 /* RSSD ccw */
5455 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5456 ccw->count = usrparm.rssd_result_len;
5457 ccw->flags = CCW_FLAG_SLI ;
5458 ccw->cda = virt_to_dma32(rssd_result);
5459
5460 rc = dasd_sleep_on(cqr);
5461 if (rc)
5462 goto out_sfree;
5463
5464 rc = -EFAULT;
5465 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5466 rssd_result, usrparm.rssd_result_len))
5467 goto out_sfree;
5468 rc = 0;
5469
5470 out_sfree:
5471 dasd_sfree_request(cqr, cqr->memdev);
5472 out_free:
5473 kfree(rssd_result);
5474 kfree(psf_data);
5475 out:
5476 DBF_DEV_EVENT(DBF_WARNING, device,
5477 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5478 (int) psf0, (int) psf1, rc);
5479 return rc;
5480 }
5481
5482 static int
dasd_eckd_ioctl(struct dasd_block * block,unsigned int cmd,void __user * argp)5483 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5484 {
5485 struct dasd_device *device = block->base;
5486
5487 switch (cmd) {
5488 case BIODASDGATTR:
5489 return dasd_eckd_get_attrib(device, argp);
5490 case BIODASDSATTR:
5491 return dasd_eckd_set_attrib(device, argp);
5492 case BIODASDPSRD:
5493 return dasd_eckd_performance(device, argp);
5494 case BIODASDRLSE:
5495 return dasd_eckd_release(device);
5496 case BIODASDRSRV:
5497 return dasd_eckd_reserve(device);
5498 case BIODASDSLCK:
5499 return dasd_eckd_steal_lock(device);
5500 case BIODASDSNID:
5501 return dasd_eckd_snid(device, argp);
5502 case BIODASDSYMMIO:
5503 return dasd_symm_io(device, argp);
5504 default:
5505 return -ENOTTY;
5506 }
5507 }
5508
5509 /*
5510 * Dump the range of CCWs into 'page' buffer
5511 * and return number of printed chars.
5512 */
5513 static void
dasd_eckd_dump_ccw_range(struct dasd_device * device,struct ccw1 * from,struct ccw1 * to,char * page)5514 dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
5515 struct ccw1 *to, char *page)
5516 {
5517 int len, count;
5518 char *datap;
5519
5520 len = 0;
5521 while (from <= to) {
5522 len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
5523 from, ((int *) from)[0], ((int *) from)[1]);
5524
5525 /* get pointer to data (consider IDALs) */
5526 if (from->flags & CCW_FLAG_IDA)
5527 datap = dma64_to_virt(*((dma64_t *)dma32_to_virt(from->cda)));
5528 else
5529 datap = dma32_to_virt(from->cda);
5530
5531 /* dump data (max 128 bytes) */
5532 for (count = 0; count < from->count && count < 128; count++) {
5533 if (count % 32 == 0)
5534 len += sprintf(page + len, "\n");
5535 if (count % 8 == 0)
5536 len += sprintf(page + len, " ");
5537 if (count % 4 == 0)
5538 len += sprintf(page + len, " ");
5539 len += sprintf(page + len, "%02x", datap[count]);
5540 }
5541 len += sprintf(page + len, "\n");
5542 from++;
5543 }
5544 if (len > 0)
5545 dev_err(&device->cdev->dev, "%s", page);
5546 }
5547
5548 static void
dasd_eckd_dump_sense_dbf(struct dasd_device * device,struct irb * irb,char * reason)5549 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5550 char *reason)
5551 {
5552 u64 *sense;
5553 u64 *stat;
5554
5555 sense = (u64 *) dasd_get_sense(irb);
5556 stat = (u64 *) &irb->scsw;
5557 if (sense) {
5558 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5559 "%016llx %016llx %016llx %016llx",
5560 reason, *stat, *((u32 *) (stat + 1)),
5561 sense[0], sense[1], sense[2], sense[3]);
5562 } else {
5563 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5564 reason, *stat, *((u32 *) (stat + 1)),
5565 "NO VALID SENSE");
5566 }
5567 }
5568
5569 /*
5570 * Print sense data and related channel program.
5571 * Parts are printed because printk buffer is only 1024 bytes.
5572 */
dasd_eckd_dump_sense_ccw(struct dasd_device * device,struct dasd_ccw_req * req,struct irb * irb)5573 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
5574 struct dasd_ccw_req *req, struct irb *irb)
5575 {
5576 struct ccw1 *first, *last, *fail, *from, *to;
5577 struct device *dev;
5578 int len, sl, sct;
5579 char *page;
5580
5581 dev = &device->cdev->dev;
5582
5583 page = (char *) get_zeroed_page(GFP_ATOMIC);
5584 if (page == NULL) {
5585 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5586 "No memory to dump sense data\n");
5587 return;
5588 }
5589 /* dump the sense data */
5590 len = sprintf(page, "I/O status report:\n");
5591 len += sprintf(page + len,
5592 "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X CS:%02X RC:%d\n",
5593 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5594 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5595 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5596 req ? req->intrc : 0);
5597 len += sprintf(page + len, "Failing CCW: %px\n",
5598 dma32_to_virt(irb->scsw.cmd.cpa));
5599 if (irb->esw.esw0.erw.cons) {
5600 for (sl = 0; sl < 4; sl++) {
5601 len += sprintf(page + len, "Sense(hex) %2d-%2d:",
5602 (8 * sl), ((8 * sl) + 7));
5603
5604 for (sct = 0; sct < 8; sct++) {
5605 len += sprintf(page + len, " %02x",
5606 irb->ecw[8 * sl + sct]);
5607 }
5608 len += sprintf(page + len, "\n");
5609 }
5610
5611 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5612 /* 24 Byte Sense Data */
5613 sprintf(page + len,
5614 "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
5615 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5616 irb->ecw[1] & 0x10 ? "" : "no");
5617 } else {
5618 /* 32 Byte Sense Data */
5619 sprintf(page + len,
5620 "32 Byte: Format: %x Exception class %x\n",
5621 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5622 }
5623 } else {
5624 sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
5625 }
5626 dev_err(dev, "%s", page);
5627
5628 if (req) {
5629 /* req == NULL for unsolicited interrupts */
5630 /* dump the Channel Program (max 140 Bytes per line) */
5631 /* Count CCW and print first CCWs (maximum 7) */
5632 first = req->cpaddr;
5633 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5634 to = min(first + 6, last);
5635 dev_err(dev, "Related CP in req: %px\n", req);
5636 dasd_eckd_dump_ccw_range(device, first, to, page);
5637
5638 /* print failing CCW area (maximum 4) */
5639 /* scsw->cda is either valid or zero */
5640 from = ++to;
5641 fail = dma32_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
5642 if (from < fail - 2) {
5643 from = fail - 2; /* there is a gap - print header */
5644 dev_err(dev, "......\n");
5645 }
5646 to = min(fail + 1, last);
5647 dasd_eckd_dump_ccw_range(device, from, to, page + len);
5648
5649 /* print last CCWs (maximum 2) */
5650 len = 0;
5651 from = max(from, ++to);
5652 if (from < last - 1) {
5653 from = last - 1; /* there is a gap - print header */
5654 dev_err(dev, "......\n");
5655 }
5656 dasd_eckd_dump_ccw_range(device, from, last, page + len);
5657 }
5658 free_page((unsigned long) page);
5659 }
5660
5661
5662 /*
5663 * Print sense data from a tcw.
5664 */
dasd_eckd_dump_sense_tcw(struct dasd_device * device,struct dasd_ccw_req * req,struct irb * irb)5665 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5666 struct dasd_ccw_req *req, struct irb *irb)
5667 {
5668 char *page;
5669 int len, sl, sct, residual;
5670 struct tsb *tsb;
5671 u8 *sense, *rcq;
5672
5673 page = (char *) get_zeroed_page(GFP_ATOMIC);
5674 if (page == NULL) {
5675 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5676 "No memory to dump sense data");
5677 return;
5678 }
5679 /* dump the sense data */
5680 len = sprintf(page, "I/O status report:\n");
5681 len += sprintf(page + len,
5682 "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5683 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5684 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5685 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5686 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5687 irb->scsw.tm.fcxs,
5688 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5689 req ? req->intrc : 0);
5690 len += sprintf(page + len, "Failing TCW: %px\n",
5691 dma32_to_virt(irb->scsw.tm.tcw));
5692
5693 tsb = NULL;
5694 sense = NULL;
5695 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5696 tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
5697
5698 if (tsb) {
5699 len += sprintf(page + len, "tsb->length %d\n", tsb->length);
5700 len += sprintf(page + len, "tsb->flags %x\n", tsb->flags);
5701 len += sprintf(page + len, "tsb->dcw_offset %d\n", tsb->dcw_offset);
5702 len += sprintf(page + len, "tsb->count %d\n", tsb->count);
5703 residual = tsb->count - 28;
5704 len += sprintf(page + len, "residual %d\n", residual);
5705
5706 switch (tsb->flags & 0x07) {
5707 case 1: /* tsa_iostat */
5708 len += sprintf(page + len, "tsb->tsa.iostat.dev_time %d\n",
5709 tsb->tsa.iostat.dev_time);
5710 len += sprintf(page + len, "tsb->tsa.iostat.def_time %d\n",
5711 tsb->tsa.iostat.def_time);
5712 len += sprintf(page + len, "tsb->tsa.iostat.queue_time %d\n",
5713 tsb->tsa.iostat.queue_time);
5714 len += sprintf(page + len, "tsb->tsa.iostat.dev_busy_time %d\n",
5715 tsb->tsa.iostat.dev_busy_time);
5716 len += sprintf(page + len, "tsb->tsa.iostat.dev_act_time %d\n",
5717 tsb->tsa.iostat.dev_act_time);
5718 sense = tsb->tsa.iostat.sense;
5719 break;
5720 case 2: /* ts_ddpc */
5721 len += sprintf(page + len, "tsb->tsa.ddpc.rc %d\n",
5722 tsb->tsa.ddpc.rc);
5723 for (sl = 0; sl < 2; sl++) {
5724 len += sprintf(page + len,
5725 "tsb->tsa.ddpc.rcq %2d-%2d: ",
5726 (8 * sl), ((8 * sl) + 7));
5727 rcq = tsb->tsa.ddpc.rcq;
5728 for (sct = 0; sct < 8; sct++) {
5729 len += sprintf(page + len, "%02x",
5730 rcq[8 * sl + sct]);
5731 }
5732 len += sprintf(page + len, "\n");
5733 }
5734 sense = tsb->tsa.ddpc.sense;
5735 break;
5736 case 3: /* tsa_intrg */
5737 len += sprintf(page + len,
5738 "tsb->tsa.intrg.: not supported yet\n");
5739 break;
5740 }
5741
5742 if (sense) {
5743 for (sl = 0; sl < 4; sl++) {
5744 len += sprintf(page + len,
5745 "Sense(hex) %2d-%2d:",
5746 (8 * sl), ((8 * sl) + 7));
5747 for (sct = 0; sct < 8; sct++) {
5748 len += sprintf(page + len, " %02x",
5749 sense[8 * sl + sct]);
5750 }
5751 len += sprintf(page + len, "\n");
5752 }
5753
5754 if (sense[27] & DASD_SENSE_BIT_0) {
5755 /* 24 Byte Sense Data */
5756 sprintf(page + len,
5757 "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
5758 sense[7] >> 4, sense[7] & 0x0f,
5759 sense[1] & 0x10 ? "" : "no");
5760 } else {
5761 /* 32 Byte Sense Data */
5762 sprintf(page + len,
5763 "32 Byte: Format: %x Exception class %x\n",
5764 sense[6] & 0x0f, sense[22] >> 4);
5765 }
5766 } else {
5767 sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
5768 }
5769 } else {
5770 sprintf(page + len, "SORRY - NO TSB DATA AVAILABLE\n");
5771 }
5772 dev_err(&device->cdev->dev, "%s", page);
5773 free_page((unsigned long) page);
5774 }
5775
dasd_eckd_dump_sense(struct dasd_device * device,struct dasd_ccw_req * req,struct irb * irb)5776 static void dasd_eckd_dump_sense(struct dasd_device *device,
5777 struct dasd_ccw_req *req, struct irb *irb)
5778 {
5779 u8 *sense = dasd_get_sense(irb);
5780
5781 /*
5782 * In some cases certain errors might be expected and
5783 * log messages shouldn't be written then.
5784 * Check if the according suppress bit is set.
5785 */
5786 if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
5787 !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
5788 test_bit(DASD_CQR_SUPPRESS_IT, &req->flags))
5789 return;
5790
5791 if (sense && sense[0] & SNS0_CMD_REJECT &&
5792 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5793 return;
5794
5795 if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5796 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5797 return;
5798
5799 if (scsw_cstat(&irb->scsw) == 0x40 &&
5800 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5801 return;
5802
5803 if (scsw_is_tm(&irb->scsw))
5804 dasd_eckd_dump_sense_tcw(device, req, irb);
5805 else
5806 dasd_eckd_dump_sense_ccw(device, req, irb);
5807 }
5808
dasd_eckd_reload_device(struct dasd_device * device)5809 static int dasd_eckd_reload_device(struct dasd_device *device)
5810 {
5811 struct dasd_eckd_private *private = device->private;
5812 char print_uid[DASD_UID_STRLEN];
5813 int rc, old_base;
5814 struct dasd_uid uid;
5815 unsigned long flags;
5816
5817 /*
5818 * remove device from alias handling to prevent new requests
5819 * from being scheduled on the wrong alias device
5820 */
5821 dasd_alias_remove_device(device);
5822
5823 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5824 old_base = private->uid.base_unit_addr;
5825 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5826
5827 /* Read Configuration Data */
5828 rc = dasd_eckd_read_conf(device);
5829 if (rc)
5830 goto out_err;
5831
5832 dasd_eckd_read_fc_security(device);
5833
5834 rc = dasd_eckd_generate_uid(device);
5835 if (rc)
5836 goto out_err;
5837 /*
5838 * update unit address configuration and
5839 * add device to alias management
5840 */
5841 dasd_alias_update_add_device(device);
5842
5843 dasd_eckd_get_uid(device, &uid);
5844
5845 if (old_base != uid.base_unit_addr) {
5846 dasd_eckd_get_uid_string(&private->conf, print_uid);
5847 dev_info(&device->cdev->dev,
5848 "An Alias device was reassigned to a new base device "
5849 "with UID: %s\n", print_uid);
5850 }
5851 return 0;
5852
5853 out_err:
5854 return -1;
5855 }
5856
dasd_eckd_read_message_buffer(struct dasd_device * device,struct dasd_rssd_messages * messages,__u8 lpum)5857 static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5858 struct dasd_rssd_messages *messages,
5859 __u8 lpum)
5860 {
5861 struct dasd_rssd_messages *message_buf;
5862 struct dasd_psf_prssd_data *prssdp;
5863 struct dasd_ccw_req *cqr;
5864 struct ccw1 *ccw;
5865 int rc;
5866
5867 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5868 (sizeof(struct dasd_psf_prssd_data) +
5869 sizeof(struct dasd_rssd_messages)),
5870 device, NULL);
5871 if (IS_ERR(cqr)) {
5872 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5873 "Could not allocate read message buffer request");
5874 return PTR_ERR(cqr);
5875 }
5876
5877 cqr->lpm = lpum;
5878 retry:
5879 cqr->startdev = device;
5880 cqr->memdev = device;
5881 cqr->block = NULL;
5882 cqr->expires = 10 * HZ;
5883 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5884 /* dasd_sleep_on_immediatly does not do complex error
5885 * recovery so clear erp flag and set retry counter to
5886 * do basic erp */
5887 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5888 cqr->retries = 256;
5889
5890 /* Prepare for Read Subsystem Data */
5891 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5892 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5893 prssdp->order = PSF_ORDER_PRSSD;
5894 prssdp->suborder = 0x03; /* Message Buffer */
5895 /* all other bytes of prssdp must be zero */
5896
5897 ccw = cqr->cpaddr;
5898 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5899 ccw->count = sizeof(struct dasd_psf_prssd_data);
5900 ccw->flags |= CCW_FLAG_CC;
5901 ccw->flags |= CCW_FLAG_SLI;
5902 ccw->cda = virt_to_dma32(prssdp);
5903
5904 /* Read Subsystem Data - message buffer */
5905 message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5906 memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5907
5908 ccw++;
5909 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5910 ccw->count = sizeof(struct dasd_rssd_messages);
5911 ccw->flags |= CCW_FLAG_SLI;
5912 ccw->cda = virt_to_dma32(message_buf);
5913
5914 cqr->buildclk = get_tod_clock();
5915 cqr->status = DASD_CQR_FILLED;
5916 rc = dasd_sleep_on_immediatly(cqr);
5917 if (rc == 0) {
5918 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5919 message_buf = (struct dasd_rssd_messages *)
5920 (prssdp + 1);
5921 memcpy(messages, message_buf,
5922 sizeof(struct dasd_rssd_messages));
5923 } else if (cqr->lpm) {
5924 /*
5925 * on z/VM we might not be able to do I/O on the requested path
5926 * but instead we get the required information on any path
5927 * so retry with open path mask
5928 */
5929 cqr->lpm = 0;
5930 goto retry;
5931 } else
5932 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5933 "Reading messages failed with rc=%d\n"
5934 , rc);
5935 dasd_sfree_request(cqr, cqr->memdev);
5936 return rc;
5937 }
5938
dasd_eckd_query_host_access(struct dasd_device * device,struct dasd_psf_query_host_access * data)5939 static int dasd_eckd_query_host_access(struct dasd_device *device,
5940 struct dasd_psf_query_host_access *data)
5941 {
5942 struct dasd_eckd_private *private = device->private;
5943 struct dasd_psf_query_host_access *host_access;
5944 struct dasd_psf_prssd_data *prssdp;
5945 struct dasd_ccw_req *cqr;
5946 struct ccw1 *ccw;
5947 int rc;
5948
5949 /* not available for HYPER PAV alias devices */
5950 if (!device->block && private->lcu->pav == HYPER_PAV)
5951 return -EOPNOTSUPP;
5952
5953 /* may not be supported by the storage server */
5954 if (!(private->features.feature[14] & 0x80))
5955 return -EOPNOTSUPP;
5956
5957 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5958 sizeof(struct dasd_psf_prssd_data) + 1,
5959 device, NULL);
5960 if (IS_ERR(cqr)) {
5961 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5962 "Could not allocate read message buffer request");
5963 return PTR_ERR(cqr);
5964 }
5965 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5966 if (!host_access) {
5967 dasd_sfree_request(cqr, device);
5968 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5969 "Could not allocate host_access buffer");
5970 return -ENOMEM;
5971 }
5972 cqr->startdev = device;
5973 cqr->memdev = device;
5974 cqr->block = NULL;
5975 cqr->retries = 256;
5976 cqr->expires = 10 * HZ;
5977
5978 /* Prepare for Read Subsystem Data */
5979 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5980 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5981 prssdp->order = PSF_ORDER_PRSSD;
5982 prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
5983 /* LSS and Volume that will be queried */
5984 prssdp->lss = private->conf.ned->ID;
5985 prssdp->volume = private->conf.ned->unit_addr;
5986 /* all other bytes of prssdp must be zero */
5987
5988 ccw = cqr->cpaddr;
5989 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5990 ccw->count = sizeof(struct dasd_psf_prssd_data);
5991 ccw->flags |= CCW_FLAG_CC;
5992 ccw->flags |= CCW_FLAG_SLI;
5993 ccw->cda = virt_to_dma32(prssdp);
5994
5995 /* Read Subsystem Data - query host access */
5996 ccw++;
5997 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5998 ccw->count = sizeof(struct dasd_psf_query_host_access);
5999 ccw->flags |= CCW_FLAG_SLI;
6000 ccw->cda = virt_to_dma32(host_access);
6001
6002 cqr->buildclk = get_tod_clock();
6003 cqr->status = DASD_CQR_FILLED;
6004 /* the command might not be supported, suppress error message */
6005 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
6006 rc = dasd_sleep_on_interruptible(cqr);
6007 if (rc == 0) {
6008 *data = *host_access;
6009 } else {
6010 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6011 "Reading host access data failed with rc=%d\n",
6012 rc);
6013 rc = -EOPNOTSUPP;
6014 }
6015
6016 dasd_sfree_request(cqr, cqr->memdev);
6017 kfree(host_access);
6018 return rc;
6019 }
6020 /*
6021 * return number of grouped devices
6022 */
dasd_eckd_host_access_count(struct dasd_device * device)6023 static int dasd_eckd_host_access_count(struct dasd_device *device)
6024 {
6025 struct dasd_psf_query_host_access *access;
6026 struct dasd_ckd_path_group_entry *entry;
6027 struct dasd_ckd_host_information *info;
6028 int count = 0;
6029 int rc, i;
6030
6031 access = kzalloc(sizeof(*access), GFP_NOIO);
6032 if (!access) {
6033 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6034 "Could not allocate access buffer");
6035 return -ENOMEM;
6036 }
6037 rc = dasd_eckd_query_host_access(device, access);
6038 if (rc) {
6039 kfree(access);
6040 return rc;
6041 }
6042
6043 info = (struct dasd_ckd_host_information *)
6044 access->host_access_information;
6045 for (i = 0; i < info->entry_count; i++) {
6046 entry = (struct dasd_ckd_path_group_entry *)
6047 (info->entry + i * info->entry_size);
6048 if (entry->status_flags & DASD_ECKD_PG_GROUPED)
6049 count++;
6050 }
6051
6052 kfree(access);
6053 return count;
6054 }
6055
6056 /*
6057 * write host access information to a sequential file
6058 */
dasd_hosts_print(struct dasd_device * device,struct seq_file * m)6059 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
6060 {
6061 struct dasd_psf_query_host_access *access;
6062 struct dasd_ckd_path_group_entry *entry;
6063 struct dasd_ckd_host_information *info;
6064 char sysplex[9] = "";
6065 int rc, i;
6066
6067 access = kzalloc(sizeof(*access), GFP_NOIO);
6068 if (!access) {
6069 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6070 "Could not allocate access buffer");
6071 return -ENOMEM;
6072 }
6073 rc = dasd_eckd_query_host_access(device, access);
6074 if (rc) {
6075 kfree(access);
6076 return rc;
6077 }
6078
6079 info = (struct dasd_ckd_host_information *)
6080 access->host_access_information;
6081 for (i = 0; i < info->entry_count; i++) {
6082 entry = (struct dasd_ckd_path_group_entry *)
6083 (info->entry + i * info->entry_size);
6084 /* PGID */
6085 seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
6086 /* FLAGS */
6087 seq_printf(m, "status_flags %02x\n", entry->status_flags);
6088 /* SYSPLEX NAME */
6089 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
6090 EBCASC(sysplex, sizeof(sysplex));
6091 seq_printf(m, "sysplex_name %8s\n", sysplex);
6092 /* SUPPORTED CYLINDER */
6093 seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
6094 /* TIMESTAMP */
6095 seq_printf(m, "timestamp %lu\n", (unsigned long)
6096 entry->timestamp);
6097 }
6098 kfree(access);
6099
6100 return 0;
6101 }
6102
6103 static struct dasd_device
copy_relation_find_device(struct dasd_copy_relation * copy,char * busid)6104 *copy_relation_find_device(struct dasd_copy_relation *copy,
6105 char *busid)
6106 {
6107 int i;
6108
6109 for (i = 0; i < DASD_CP_ENTRIES; i++) {
6110 if (copy->entry[i].configured &&
6111 strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
6112 return copy->entry[i].device;
6113 }
6114 return NULL;
6115 }
6116
6117 /*
6118 * set the new active/primary device
6119 */
copy_pair_set_active(struct dasd_copy_relation * copy,char * new_busid,char * old_busid)6120 static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busid,
6121 char *old_busid)
6122 {
6123 int i;
6124
6125 for (i = 0; i < DASD_CP_ENTRIES; i++) {
6126 if (copy->entry[i].configured &&
6127 strncmp(copy->entry[i].busid, new_busid,
6128 DASD_BUS_ID_SIZE) == 0) {
6129 copy->active = ©->entry[i];
6130 copy->entry[i].primary = true;
6131 } else if (copy->entry[i].configured &&
6132 strncmp(copy->entry[i].busid, old_busid,
6133 DASD_BUS_ID_SIZE) == 0) {
6134 copy->entry[i].primary = false;
6135 }
6136 }
6137 }
6138
6139 /*
6140 * The function will swap the role of a given copy pair.
6141 * During the swap operation the relation of the blockdevice is disconnected
6142 * from the old primary and connected to the new.
6143 *
6144 * IO is paused on the block queue before swap and may be resumed afterwards.
6145 */
dasd_eckd_copy_pair_swap(struct dasd_device * device,char * prim_busid,char * sec_busid)6146 static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
6147 char *sec_busid)
6148 {
6149 struct dasd_device *primary, *secondary;
6150 struct dasd_copy_relation *copy;
6151 struct dasd_block *block;
6152 struct gendisk *gdp;
6153
6154 copy = device->copy;
6155 if (!copy)
6156 return DASD_COPYPAIRSWAP_INVALID;
6157 primary = copy->active->device;
6158 if (!primary)
6159 return DASD_COPYPAIRSWAP_INVALID;
6160 /* double check if swap has correct primary */
6161 if (strncmp(dev_name(&primary->cdev->dev), prim_busid, DASD_BUS_ID_SIZE) != 0)
6162 return DASD_COPYPAIRSWAP_PRIMARY;
6163
6164 secondary = copy_relation_find_device(copy, sec_busid);
6165 if (!secondary)
6166 return DASD_COPYPAIRSWAP_SECONDARY;
6167
6168 /*
6169 * usually the device should be quiesced for swap
6170 * for paranoia stop device and requeue requests again
6171 */
6172 dasd_device_set_stop_bits(primary, DASD_STOPPED_PPRC);
6173 dasd_device_set_stop_bits(secondary, DASD_STOPPED_PPRC);
6174 dasd_generic_requeue_all_requests(primary);
6175
6176 /* swap DASD internal device <> block assignment */
6177 block = primary->block;
6178 primary->block = NULL;
6179 secondary->block = block;
6180 block->base = secondary;
6181 /* set new primary device in COPY relation */
6182 copy_pair_set_active(copy, sec_busid, prim_busid);
6183
6184 /* swap blocklayer device link */
6185 gdp = block->gdp;
6186 dasd_add_link_to_gendisk(gdp, secondary);
6187
6188 /* re-enable device */
6189 dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
6190 dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);
6191 dasd_schedule_device_bh(secondary);
6192
6193 return DASD_COPYPAIRSWAP_SUCCESS;
6194 }
6195
6196 /*
6197 * Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query
6198 */
dasd_eckd_query_pprc_status(struct dasd_device * device,struct dasd_pprc_data_sc4 * data)6199 static int dasd_eckd_query_pprc_status(struct dasd_device *device,
6200 struct dasd_pprc_data_sc4 *data)
6201 {
6202 struct dasd_pprc_data_sc4 *pprc_data;
6203 struct dasd_psf_prssd_data *prssdp;
6204 struct dasd_ccw_req *cqr;
6205 struct ccw1 *ccw;
6206 int rc;
6207
6208 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
6209 sizeof(*prssdp) + sizeof(*pprc_data) + 1,
6210 device, NULL);
6211 if (IS_ERR(cqr)) {
6212 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6213 "Could not allocate query PPRC status request");
6214 return PTR_ERR(cqr);
6215 }
6216 cqr->startdev = device;
6217 cqr->memdev = device;
6218 cqr->block = NULL;
6219 cqr->retries = 256;
6220 cqr->expires = 10 * HZ;
6221
6222 /* Prepare for Read Subsystem Data */
6223 prssdp = (struct dasd_psf_prssd_data *)cqr->data;
6224 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
6225 prssdp->order = PSF_ORDER_PRSSD;
6226 prssdp->suborder = PSF_SUBORDER_PPRCEQ;
6227 prssdp->varies[0] = PPRCEQ_SCOPE_4;
6228 pprc_data = (struct dasd_pprc_data_sc4 *)(prssdp + 1);
6229
6230 ccw = cqr->cpaddr;
6231 ccw->cmd_code = DASD_ECKD_CCW_PSF;
6232 ccw->count = sizeof(struct dasd_psf_prssd_data);
6233 ccw->flags |= CCW_FLAG_CC;
6234 ccw->flags |= CCW_FLAG_SLI;
6235 ccw->cda = virt_to_dma32(prssdp);
6236
6237 /* Read Subsystem Data - query host access */
6238 ccw++;
6239 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
6240 ccw->count = sizeof(*pprc_data);
6241 ccw->flags |= CCW_FLAG_SLI;
6242 ccw->cda = virt_to_dma32(pprc_data);
6243
6244 cqr->buildclk = get_tod_clock();
6245 cqr->status = DASD_CQR_FILLED;
6246
6247 rc = dasd_sleep_on_interruptible(cqr);
6248 if (rc == 0) {
6249 *data = *pprc_data;
6250 } else {
6251 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6252 "PPRC Extended Query failed with rc=%d\n",
6253 rc);
6254 rc = -EOPNOTSUPP;
6255 }
6256
6257 dasd_sfree_request(cqr, cqr->memdev);
6258 return rc;
6259 }
6260
6261 /*
6262 * ECKD NOP - no operation
6263 */
dasd_eckd_nop(struct dasd_device * device)6264 static int dasd_eckd_nop(struct dasd_device *device)
6265 {
6266 struct dasd_ccw_req *cqr;
6267 struct ccw1 *ccw;
6268 int rc;
6269
6270 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 1, device, NULL);
6271 if (IS_ERR(cqr)) {
6272 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6273 "Could not allocate NOP request");
6274 return PTR_ERR(cqr);
6275 }
6276 cqr->startdev = device;
6277 cqr->memdev = device;
6278 cqr->block = NULL;
6279 cqr->retries = 1;
6280 cqr->expires = 10 * HZ;
6281
6282 ccw = cqr->cpaddr;
6283 ccw->cmd_code = DASD_ECKD_CCW_NOP;
6284 ccw->flags |= CCW_FLAG_SLI;
6285
6286 cqr->buildclk = get_tod_clock();
6287 cqr->status = DASD_CQR_FILLED;
6288
6289 rc = dasd_sleep_on_interruptible(cqr);
6290 if (rc != 0) {
6291 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6292 "NOP failed with rc=%d\n", rc);
6293 rc = -EOPNOTSUPP;
6294 }
6295 dasd_sfree_request(cqr, cqr->memdev);
6296 return rc;
6297 }
6298
dasd_eckd_device_ping(struct dasd_device * device)6299 static int dasd_eckd_device_ping(struct dasd_device *device)
6300 {
6301 return dasd_eckd_nop(device);
6302 }
6303
6304 /*
6305 * Perform Subsystem Function - CUIR response
6306 */
6307 static int
dasd_eckd_psf_cuir_response(struct dasd_device * device,int response,__u32 message_id,__u8 lpum)6308 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
6309 __u32 message_id, __u8 lpum)
6310 {
6311 struct dasd_psf_cuir_response *psf_cuir;
6312 int pos = pathmask_to_pos(lpum);
6313 struct dasd_ccw_req *cqr;
6314 struct ccw1 *ccw;
6315 int rc;
6316
6317 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
6318 sizeof(struct dasd_psf_cuir_response),
6319 device, NULL);
6320
6321 if (IS_ERR(cqr)) {
6322 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6323 "Could not allocate PSF-CUIR request");
6324 return PTR_ERR(cqr);
6325 }
6326
6327 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
6328 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
6329 psf_cuir->cc = response;
6330 psf_cuir->chpid = device->path[pos].chpid;
6331 psf_cuir->message_id = message_id;
6332 psf_cuir->cssid = device->path[pos].cssid;
6333 psf_cuir->ssid = device->path[pos].ssid;
6334 ccw = cqr->cpaddr;
6335 ccw->cmd_code = DASD_ECKD_CCW_PSF;
6336 ccw->cda = virt_to_dma32(psf_cuir);
6337 ccw->flags = CCW_FLAG_SLI;
6338 ccw->count = sizeof(struct dasd_psf_cuir_response);
6339
6340 cqr->startdev = device;
6341 cqr->memdev = device;
6342 cqr->block = NULL;
6343 cqr->retries = 256;
6344 cqr->expires = 10*HZ;
6345 cqr->buildclk = get_tod_clock();
6346 cqr->status = DASD_CQR_FILLED;
6347 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6348
6349 rc = dasd_sleep_on(cqr);
6350
6351 dasd_sfree_request(cqr, cqr->memdev);
6352 return rc;
6353 }
6354
6355 /*
6356 * return configuration data that is referenced by record selector
6357 * if a record selector is specified or per default return the
6358 * conf_data pointer for the path specified by lpum
6359 */
dasd_eckd_get_ref_conf(struct dasd_device * device,__u8 lpum,struct dasd_cuir_message * cuir)6360 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
6361 __u8 lpum,
6362 struct dasd_cuir_message *cuir)
6363 {
6364 struct dasd_conf_data *conf_data;
6365 int path, pos;
6366
6367 if (cuir->record_selector == 0)
6368 goto out;
6369 for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
6370 conf_data = device->path[pos].conf_data;
6371 if (conf_data->gneq.record_selector ==
6372 cuir->record_selector)
6373 return conf_data;
6374 }
6375 out:
6376 return device->path[pathmask_to_pos(lpum)].conf_data;
6377 }
6378
6379 /*
6380 * This function determines the scope of a reconfiguration request by
6381 * analysing the path and device selection data provided in the CUIR request.
6382 * Returns a path mask containing CUIR affected paths for the give device.
6383 *
6384 * If the CUIR request does not contain the required information return the
6385 * path mask of the path the attention message for the CUIR request was reveived
6386 * on.
6387 */
dasd_eckd_cuir_scope(struct dasd_device * device,__u8 lpum,struct dasd_cuir_message * cuir)6388 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
6389 struct dasd_cuir_message *cuir)
6390 {
6391 struct dasd_conf_data *ref_conf_data;
6392 unsigned long bitmask = 0, mask = 0;
6393 struct dasd_conf_data *conf_data;
6394 unsigned int pos, path;
6395 char *ref_gneq, *gneq;
6396 char *ref_ned, *ned;
6397 int tbcpm = 0;
6398
6399 /* if CUIR request does not specify the scope use the path
6400 the attention message was presented on */
6401 if (!cuir->ned_map ||
6402 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6403 return lpum;
6404
6405 /* get reference conf data */
6406 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6407 /* reference ned is determined by ned_map field */
6408 pos = 8 - ffs(cuir->ned_map);
6409 ref_ned = (char *)&ref_conf_data->neds[pos];
6410 ref_gneq = (char *)&ref_conf_data->gneq;
6411 /* transfer 24 bit neq_map to mask */
6412 mask = cuir->neq_map[2];
6413 mask |= cuir->neq_map[1] << 8;
6414 mask |= cuir->neq_map[0] << 16;
6415
6416 for (path = 0; path < 8; path++) {
6417 /* initialise data per path */
6418 bitmask = mask;
6419 conf_data = device->path[path].conf_data;
6420 pos = 8 - ffs(cuir->ned_map);
6421 ned = (char *) &conf_data->neds[pos];
6422 /* compare reference ned and per path ned */
6423 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6424 continue;
6425 gneq = (char *)&conf_data->gneq;
6426 /* compare reference gneq and per_path gneq under
6427 24 bit mask where mask bit 0 equals byte 7 of
6428 the gneq and mask bit 24 equals byte 31 */
6429 while (bitmask) {
6430 pos = ffs(bitmask) - 1;
6431 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6432 != 0)
6433 break;
6434 clear_bit(pos, &bitmask);
6435 }
6436 if (bitmask)
6437 continue;
6438 /* device and path match the reference values
6439 add path to CUIR scope */
6440 tbcpm |= 0x80 >> path;
6441 }
6442 return tbcpm;
6443 }
6444
dasd_eckd_cuir_notify_user(struct dasd_device * device,unsigned long paths,int action)6445 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
6446 unsigned long paths, int action)
6447 {
6448 int pos;
6449
6450 while (paths) {
6451 /* get position of bit in mask */
6452 pos = 8 - ffs(paths);
6453 /* get channel path descriptor from this position */
6454 if (action == CUIR_QUIESCE)
6455 pr_warn("Service on the storage server caused path %x.%02x to go offline",
6456 device->path[pos].cssid,
6457 device->path[pos].chpid);
6458 else if (action == CUIR_RESUME)
6459 pr_info("Path %x.%02x is back online after service on the storage server",
6460 device->path[pos].cssid,
6461 device->path[pos].chpid);
6462 clear_bit(7 - pos, &paths);
6463 }
6464 }
6465
dasd_eckd_cuir_remove_path(struct dasd_device * device,__u8 lpum,struct dasd_cuir_message * cuir)6466 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6467 struct dasd_cuir_message *cuir)
6468 {
6469 unsigned long tbcpm;
6470
6471 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6472 /* nothing to do if path is not in use */
6473 if (!(dasd_path_get_opm(device) & tbcpm))
6474 return 0;
6475 if (!(dasd_path_get_opm(device) & ~tbcpm)) {
6476 /* no path would be left if the CUIR action is taken
6477 return error */
6478 return -EINVAL;
6479 }
6480 /* remove device from operational path mask */
6481 dasd_path_remove_opm(device, tbcpm);
6482 dasd_path_add_cuirpm(device, tbcpm);
6483 return tbcpm;
6484 }
6485
6486 /*
6487 * walk through all devices and build a path mask to quiesce them
6488 * return an error if the last path to a device would be removed
6489 *
6490 * if only part of the devices are quiesced and an error
6491 * occurs no onlining necessary, the storage server will
6492 * notify the already set offline devices again
6493 */
dasd_eckd_cuir_quiesce(struct dasd_device * device,__u8 lpum,struct dasd_cuir_message * cuir)6494 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
6495 struct dasd_cuir_message *cuir)
6496 {
6497 struct dasd_eckd_private *private = device->private;
6498 struct alias_pav_group *pavgroup, *tempgroup;
6499 struct dasd_device *dev, *n;
6500 unsigned long paths = 0;
6501 unsigned long flags;
6502 int tbcpm;
6503
6504 /* active devices */
6505 list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6506 alias_list) {
6507 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6508 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6509 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6510 if (tbcpm < 0)
6511 goto out_err;
6512 paths |= tbcpm;
6513 }
6514 /* inactive devices */
6515 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6516 alias_list) {
6517 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6518 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6519 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6520 if (tbcpm < 0)
6521 goto out_err;
6522 paths |= tbcpm;
6523 }
6524 /* devices in PAV groups */
6525 list_for_each_entry_safe(pavgroup, tempgroup,
6526 &private->lcu->grouplist, group) {
6527 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6528 alias_list) {
6529 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6530 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6531 spin_unlock_irqrestore(
6532 get_ccwdev_lock(dev->cdev), flags);
6533 if (tbcpm < 0)
6534 goto out_err;
6535 paths |= tbcpm;
6536 }
6537 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6538 alias_list) {
6539 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6540 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6541 spin_unlock_irqrestore(
6542 get_ccwdev_lock(dev->cdev), flags);
6543 if (tbcpm < 0)
6544 goto out_err;
6545 paths |= tbcpm;
6546 }
6547 }
6548 /* notify user about all paths affected by CUIR action */
6549 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
6550 return 0;
6551 out_err:
6552 return tbcpm;
6553 }
6554
dasd_eckd_cuir_resume(struct dasd_device * device,__u8 lpum,struct dasd_cuir_message * cuir)6555 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
6556 struct dasd_cuir_message *cuir)
6557 {
6558 struct dasd_eckd_private *private = device->private;
6559 struct alias_pav_group *pavgroup, *tempgroup;
6560 struct dasd_device *dev, *n;
6561 unsigned long paths = 0;
6562 int tbcpm;
6563
6564 /*
6565 * the path may have been added through a generic path event before
6566 * only trigger path verification if the path is not already in use
6567 */
6568 list_for_each_entry_safe(dev, n,
6569 &private->lcu->active_devices,
6570 alias_list) {
6571 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6572 paths |= tbcpm;
6573 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6574 dasd_path_add_tbvpm(dev, tbcpm);
6575 dasd_schedule_device_bh(dev);
6576 }
6577 }
6578 list_for_each_entry_safe(dev, n,
6579 &private->lcu->inactive_devices,
6580 alias_list) {
6581 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6582 paths |= tbcpm;
6583 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6584 dasd_path_add_tbvpm(dev, tbcpm);
6585 dasd_schedule_device_bh(dev);
6586 }
6587 }
6588 /* devices in PAV groups */
6589 list_for_each_entry_safe(pavgroup, tempgroup,
6590 &private->lcu->grouplist,
6591 group) {
6592 list_for_each_entry_safe(dev, n,
6593 &pavgroup->baselist,
6594 alias_list) {
6595 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6596 paths |= tbcpm;
6597 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6598 dasd_path_add_tbvpm(dev, tbcpm);
6599 dasd_schedule_device_bh(dev);
6600 }
6601 }
6602 list_for_each_entry_safe(dev, n,
6603 &pavgroup->aliaslist,
6604 alias_list) {
6605 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6606 paths |= tbcpm;
6607 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6608 dasd_path_add_tbvpm(dev, tbcpm);
6609 dasd_schedule_device_bh(dev);
6610 }
6611 }
6612 }
6613 /* notify user about all paths affected by CUIR action */
6614 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
6615 return 0;
6616 }
6617
dasd_eckd_handle_cuir(struct dasd_device * device,void * messages,__u8 lpum)6618 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6619 __u8 lpum)
6620 {
6621 struct dasd_cuir_message *cuir = messages;
6622 int response;
6623
6624 DBF_DEV_EVENT(DBF_WARNING, device,
6625 "CUIR request: %016llx %016llx %016llx %08x",
6626 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6627 ((u32 *)cuir)[3]);
6628
6629 if (cuir->code == CUIR_QUIESCE) {
6630 /* quiesce */
6631 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
6632 response = PSF_CUIR_LAST_PATH;
6633 else
6634 response = PSF_CUIR_COMPLETED;
6635 } else if (cuir->code == CUIR_RESUME) {
6636 /* resume */
6637 dasd_eckd_cuir_resume(device, lpum, cuir);
6638 response = PSF_CUIR_COMPLETED;
6639 } else
6640 response = PSF_CUIR_NOT_SUPPORTED;
6641
6642 dasd_eckd_psf_cuir_response(device, response,
6643 cuir->message_id, lpum);
6644 DBF_DEV_EVENT(DBF_WARNING, device,
6645 "CUIR response: %d on message ID %08x", response,
6646 cuir->message_id);
6647 /* to make sure there is no attention left schedule work again */
6648 device->discipline->check_attention(device, lpum);
6649 }
6650
dasd_eckd_oos_resume(struct dasd_device * device)6651 static void dasd_eckd_oos_resume(struct dasd_device *device)
6652 {
6653 struct dasd_eckd_private *private = device->private;
6654 struct alias_pav_group *pavgroup, *tempgroup;
6655 struct dasd_device *dev, *n;
6656 unsigned long flags;
6657
6658 spin_lock_irqsave(&private->lcu->lock, flags);
6659 list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6660 alias_list) {
6661 if (dev->stopped & DASD_STOPPED_NOSPC)
6662 dasd_generic_space_avail(dev);
6663 }
6664 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6665 alias_list) {
6666 if (dev->stopped & DASD_STOPPED_NOSPC)
6667 dasd_generic_space_avail(dev);
6668 }
6669 /* devices in PAV groups */
6670 list_for_each_entry_safe(pavgroup, tempgroup,
6671 &private->lcu->grouplist,
6672 group) {
6673 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6674 alias_list) {
6675 if (dev->stopped & DASD_STOPPED_NOSPC)
6676 dasd_generic_space_avail(dev);
6677 }
6678 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6679 alias_list) {
6680 if (dev->stopped & DASD_STOPPED_NOSPC)
6681 dasd_generic_space_avail(dev);
6682 }
6683 }
6684 spin_unlock_irqrestore(&private->lcu->lock, flags);
6685 }
6686
dasd_eckd_handle_oos(struct dasd_device * device,void * messages,__u8 lpum)6687 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
6688 __u8 lpum)
6689 {
6690 struct dasd_oos_message *oos = messages;
6691
6692 switch (oos->code) {
6693 case REPO_WARN:
6694 case POOL_WARN:
6695 dev_warn(&device->cdev->dev,
6696 "Extent pool usage has reached a critical value\n");
6697 dasd_eckd_oos_resume(device);
6698 break;
6699 case REPO_EXHAUST:
6700 case POOL_EXHAUST:
6701 dev_warn(&device->cdev->dev,
6702 "Extent pool is exhausted\n");
6703 break;
6704 case REPO_RELIEVE:
6705 case POOL_RELIEVE:
6706 dev_info(&device->cdev->dev,
6707 "Extent pool physical space constraint has been relieved\n");
6708 break;
6709 }
6710
6711 /* In any case, update related data */
6712 dasd_eckd_read_ext_pool_info(device);
6713
6714 /* to make sure there is no attention left schedule work again */
6715 device->discipline->check_attention(device, lpum);
6716 }
6717
dasd_eckd_check_attention_work(struct work_struct * work)6718 static void dasd_eckd_check_attention_work(struct work_struct *work)
6719 {
6720 struct check_attention_work_data *data;
6721 struct dasd_rssd_messages *messages;
6722 struct dasd_device *device;
6723 int rc;
6724
6725 data = container_of(work, struct check_attention_work_data, worker);
6726 device = data->device;
6727 messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6728 if (!messages) {
6729 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6730 "Could not allocate attention message buffer");
6731 goto out;
6732 }
6733 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6734 if (rc)
6735 goto out;
6736
6737 if (messages->length == ATTENTION_LENGTH_CUIR &&
6738 messages->format == ATTENTION_FORMAT_CUIR)
6739 dasd_eckd_handle_cuir(device, messages, data->lpum);
6740 if (messages->length == ATTENTION_LENGTH_OOS &&
6741 messages->format == ATTENTION_FORMAT_OOS)
6742 dasd_eckd_handle_oos(device, messages, data->lpum);
6743
6744 out:
6745 dasd_put_device(device);
6746 kfree(messages);
6747 kfree(data);
6748 }
6749
dasd_eckd_check_attention(struct dasd_device * device,__u8 lpum)6750 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6751 {
6752 struct check_attention_work_data *data;
6753
6754 data = kzalloc(sizeof(*data), GFP_ATOMIC);
6755 if (!data)
6756 return -ENOMEM;
6757 INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6758 dasd_get_device(device);
6759 data->device = device;
6760 data->lpum = lpum;
6761 schedule_work(&data->worker);
6762 return 0;
6763 }
6764
dasd_eckd_disable_hpf_path(struct dasd_device * device,__u8 lpum)6765 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6766 {
6767 if (~lpum & dasd_path_get_opm(device)) {
6768 dasd_path_add_nohpfpm(device, lpum);
6769 dasd_path_remove_opm(device, lpum);
6770 dev_err(&device->cdev->dev,
6771 "Channel path %02X lost HPF functionality and is disabled\n",
6772 lpum);
6773 return 1;
6774 }
6775 return 0;
6776 }
6777
dasd_eckd_disable_hpf_device(struct dasd_device * device)6778 static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6779 {
6780 struct dasd_eckd_private *private = device->private;
6781
6782 dev_err(&device->cdev->dev,
6783 "High Performance FICON disabled\n");
6784 private->fcx_max_data = 0;
6785 }
6786
dasd_eckd_hpf_enabled(struct dasd_device * device)6787 static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6788 {
6789 struct dasd_eckd_private *private = device->private;
6790
6791 return private->fcx_max_data ? 1 : 0;
6792 }
6793
dasd_eckd_handle_hpf_error(struct dasd_device * device,struct irb * irb)6794 static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6795 struct irb *irb)
6796 {
6797 struct dasd_eckd_private *private = device->private;
6798
6799 if (!private->fcx_max_data) {
6800 /* sanity check for no HPF, the error makes no sense */
6801 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6802 "Trying to disable HPF for a non HPF device");
6803 return;
6804 }
6805 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6806 dasd_eckd_disable_hpf_device(device);
6807 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6808 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6809 return;
6810 dasd_eckd_disable_hpf_device(device);
6811 dasd_path_set_tbvpm(device,
6812 dasd_path_get_hpfpm(device));
6813 }
6814 /*
6815 * prevent that any new I/O ist started on the device and schedule a
6816 * requeue of existing requests
6817 */
6818 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6819 dasd_schedule_requeue(device);
6820 }
6821
dasd_eckd_max_sectors(struct dasd_block * block)6822 static unsigned int dasd_eckd_max_sectors(struct dasd_block *block)
6823 {
6824 if (block->base->features & DASD_FEATURE_USERAW) {
6825 /*
6826 * the max_blocks value for raw_track access is 256
6827 * it is higher than the native ECKD value because we
6828 * only need one ccw per track
6829 * so the max_hw_sectors are
6830 * 2048 x 512B = 1024kB = 16 tracks
6831 */
6832 return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6833 }
6834
6835 return DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
6836 }
6837
6838 static struct ccw_driver dasd_eckd_driver = {
6839 .driver = {
6840 .name = "dasd-eckd",
6841 .owner = THIS_MODULE,
6842 .dev_groups = dasd_dev_groups,
6843 },
6844 .ids = dasd_eckd_ids,
6845 .probe = dasd_eckd_probe,
6846 .remove = dasd_generic_remove,
6847 .set_offline = dasd_generic_set_offline,
6848 .set_online = dasd_eckd_set_online,
6849 .notify = dasd_generic_notify,
6850 .path_event = dasd_generic_path_event,
6851 .shutdown = dasd_generic_shutdown,
6852 .uc_handler = dasd_generic_uc_handler,
6853 .int_class = IRQIO_DAS,
6854 };
6855
6856 static struct dasd_discipline dasd_eckd_discipline = {
6857 .owner = THIS_MODULE,
6858 .name = "ECKD",
6859 .ebcname = "ECKD",
6860 .check_device = dasd_eckd_check_characteristics,
6861 .uncheck_device = dasd_eckd_uncheck_device,
6862 .do_analysis = dasd_eckd_do_analysis,
6863 .pe_handler = dasd_eckd_pe_handler,
6864 .basic_to_ready = dasd_eckd_basic_to_ready,
6865 .online_to_ready = dasd_eckd_online_to_ready,
6866 .basic_to_known = dasd_eckd_basic_to_known,
6867 .max_sectors = dasd_eckd_max_sectors,
6868 .fill_geometry = dasd_eckd_fill_geometry,
6869 .start_IO = dasd_start_IO,
6870 .term_IO = dasd_term_IO,
6871 .handle_terminated_request = dasd_eckd_handle_terminated_request,
6872 .format_device = dasd_eckd_format_device,
6873 .check_device_format = dasd_eckd_check_device_format,
6874 .erp_action = dasd_eckd_erp_action,
6875 .erp_postaction = dasd_eckd_erp_postaction,
6876 .check_for_device_change = dasd_eckd_check_for_device_change,
6877 .build_cp = dasd_eckd_build_alias_cp,
6878 .free_cp = dasd_eckd_free_alias_cp,
6879 .dump_sense = dasd_eckd_dump_sense,
6880 .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6881 .fill_info = dasd_eckd_fill_info,
6882 .ioctl = dasd_eckd_ioctl,
6883 .reload = dasd_eckd_reload_device,
6884 .get_uid = dasd_eckd_get_uid,
6885 .kick_validate = dasd_eckd_kick_validate_server,
6886 .check_attention = dasd_eckd_check_attention,
6887 .host_access_count = dasd_eckd_host_access_count,
6888 .hosts_print = dasd_hosts_print,
6889 .handle_hpf_error = dasd_eckd_handle_hpf_error,
6890 .disable_hpf = dasd_eckd_disable_hpf_device,
6891 .hpf_enabled = dasd_eckd_hpf_enabled,
6892 .reset_path = dasd_eckd_reset_path,
6893 .is_ese = dasd_eckd_is_ese,
6894 .space_allocated = dasd_eckd_space_allocated,
6895 .space_configured = dasd_eckd_space_configured,
6896 .logical_capacity = dasd_eckd_logical_capacity,
6897 .release_space = dasd_eckd_release_space,
6898 .ext_pool_id = dasd_eckd_ext_pool_id,
6899 .ext_size = dasd_eckd_ext_size,
6900 .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6901 .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6902 .ext_pool_oos = dasd_eckd_ext_pool_oos,
6903 .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
6904 .ese_format = dasd_eckd_ese_format,
6905 .ese_read = dasd_eckd_ese_read,
6906 .pprc_status = dasd_eckd_query_pprc_status,
6907 .pprc_enabled = dasd_eckd_pprc_enabled,
6908 .copy_pair_swap = dasd_eckd_copy_pair_swap,
6909 .device_ping = dasd_eckd_device_ping,
6910 };
6911
6912 static int __init
dasd_eckd_init(void)6913 dasd_eckd_init(void)
6914 {
6915 int ret;
6916
6917 ASCEBC(dasd_eckd_discipline.ebcname, 4);
6918 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6919 GFP_KERNEL | GFP_DMA);
6920 if (!dasd_reserve_req)
6921 return -ENOMEM;
6922 dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
6923 GFP_KERNEL | GFP_DMA);
6924 if (!dasd_vol_info_req) {
6925 kfree(dasd_reserve_req);
6926 return -ENOMEM;
6927 }
6928 pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
6929 GFP_KERNEL | GFP_DMA);
6930 if (!pe_handler_worker) {
6931 kfree(dasd_reserve_req);
6932 kfree(dasd_vol_info_req);
6933 return -ENOMEM;
6934 }
6935 rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6936 if (!rawpadpage) {
6937 kfree(pe_handler_worker);
6938 kfree(dasd_reserve_req);
6939 kfree(dasd_vol_info_req);
6940 return -ENOMEM;
6941 }
6942 ret = ccw_driver_register(&dasd_eckd_driver);
6943 if (!ret)
6944 wait_for_device_probe();
6945 else {
6946 kfree(pe_handler_worker);
6947 kfree(dasd_reserve_req);
6948 kfree(dasd_vol_info_req);
6949 free_page((unsigned long)rawpadpage);
6950 }
6951 return ret;
6952 }
6953
6954 static void __exit
dasd_eckd_cleanup(void)6955 dasd_eckd_cleanup(void)
6956 {
6957 ccw_driver_unregister(&dasd_eckd_driver);
6958 kfree(pe_handler_worker);
6959 kfree(dasd_reserve_req);
6960 free_page((unsigned long)rawpadpage);
6961 }
6962
6963 module_init(dasd_eckd_init);
6964 module_exit(dasd_eckd_cleanup);
6965