1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright IBM Corp. 2001, 2018 4 * Author(s): Robert Burroughs 5 * Eric Rossman (edrossma@us.ibm.com) 6 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> 13 */ 14 15 #define KMSG_COMPONENT "zcrypt" 16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/interrupt.h> 21 #include <linux/miscdevice.h> 22 #include <linux/fs.h> 23 #include <linux/compat.h> 24 #include <linux/slab.h> 25 #include <linux/atomic.h> 26 #include <linux/uaccess.h> 27 #include <linux/hw_random.h> 28 #include <linux/debugfs.h> 29 #include <linux/cdev.h> 30 #include <linux/ctype.h> 31 #include <linux/capability.h> 32 #include <asm/debug.h> 33 34 #define CREATE_TRACE_POINTS 35 #include <asm/trace/zcrypt.h> 36 37 #include "zcrypt_api.h" 38 #include "zcrypt_debug.h" 39 40 #include "zcrypt_msgtype6.h" 41 #include "zcrypt_msgtype50.h" 42 #include "zcrypt_ccamisc.h" 43 #include "zcrypt_ep11misc.h" 44 45 /* 46 * Module description. 47 */ 48 MODULE_AUTHOR("IBM Corporation"); 49 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 50 "Copyright IBM Corp. 2001, 2012"); 51 MODULE_LICENSE("GPL"); 52 53 unsigned int zcrypt_mempool_threshold = 5; 54 module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440); 55 MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)"); 56 57 /* 58 * zcrypt tracepoint functions 59 */ 60 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 61 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 62 63 DEFINE_SPINLOCK(zcrypt_list_lock); 64 LIST_HEAD(zcrypt_card_list); 65 66 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 67 68 static LIST_HEAD(zcrypt_ops_list); 69 70 /* Zcrypt related debug feature stuff. */ 71 debug_info_t *zcrypt_dbf_info; 72 73 /* 74 * Process a rescan of the transport layer. 75 * Runs a synchronous AP bus rescan. 76 * Returns true if something has changed (for example the 77 * bus scan has found and build up new devices) and it is 78 * worth to do a retry. Otherwise false is returned meaning 79 * no changes on the AP bus level. 80 */ 81 static inline bool zcrypt_process_rescan(void) 82 { 83 return ap_bus_force_rescan(); 84 } 85 86 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 87 { 88 list_add_tail(&zops->list, &zcrypt_ops_list); 89 } 90 91 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 92 { 93 list_del_init(&zops->list); 94 } 95 96 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 97 { 98 struct zcrypt_ops *zops; 99 100 list_for_each_entry(zops, &zcrypt_ops_list, list) 101 if (zops->variant == variant && 102 (!strncmp(zops->name, name, sizeof(zops->name)))) 103 return zops; 104 return NULL; 105 } 106 EXPORT_SYMBOL(zcrypt_msgtype); 107 108 /* 109 * Multi device nodes extension functions. 110 */ 111 112 struct zcdn_device; 113 114 static void zcdn_device_release(struct device *dev); 115 static const struct class zcrypt_class = { 116 .name = ZCRYPT_NAME, 117 .dev_release = zcdn_device_release, 118 }; 119 static dev_t zcrypt_devt; 120 static struct cdev zcrypt_cdev; 121 122 struct zcdn_device { 123 struct device device; 124 struct ap_perms perms; 125 }; 126 127 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device) 128 129 #define ZCDN_MAX_NAME 32 130 131 static int zcdn_create(const char *name); 132 static int zcdn_destroy(const char *name); 133 134 /* 135 * Find zcdn device by name. 136 * Returns reference to the zcdn device which needs to be released 137 * with put_device() after use. 138 */ 139 static inline struct zcdn_device *find_zcdndev_by_name(const char *name) 140 { 141 struct device *dev = class_find_device_by_name(&zcrypt_class, name); 142 143 return dev ? to_zcdn_dev(dev) : NULL; 144 } 145 146 /* 147 * Find zcdn device by devt value. 148 * Returns reference to the zcdn device which needs to be released 149 * with put_device() after use. 150 */ 151 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) 152 { 153 struct device *dev = class_find_device_by_devt(&zcrypt_class, devt); 154 155 return dev ? to_zcdn_dev(dev) : NULL; 156 } 157 158 static ssize_t ioctlmask_show(struct device *dev, 159 struct device_attribute *attr, 160 char *buf) 161 { 162 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 163 int i, n; 164 165 if (mutex_lock_interruptible(&ap_perms_mutex)) 166 return -ERESTARTSYS; 167 168 n = sysfs_emit(buf, "0x"); 169 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++) 170 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]); 171 n += sysfs_emit_at(buf, n, "\n"); 172 173 mutex_unlock(&ap_perms_mutex); 174 175 return n; 176 } 177 178 static ssize_t ioctlmask_store(struct device *dev, 179 struct device_attribute *attr, 180 const char *buf, size_t count) 181 { 182 int rc; 183 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 184 185 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, 186 AP_IOCTLS, &ap_perms_mutex); 187 if (rc) 188 return rc; 189 190 return count; 191 } 192 193 static DEVICE_ATTR_RW(ioctlmask); 194 195 static ssize_t apmask_show(struct device *dev, 196 struct device_attribute *attr, 197 char *buf) 198 { 199 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 200 int i, n; 201 202 if (mutex_lock_interruptible(&ap_perms_mutex)) 203 return -ERESTARTSYS; 204 205 n = sysfs_emit(buf, "0x"); 206 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++) 207 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]); 208 n += sysfs_emit_at(buf, n, "\n"); 209 210 mutex_unlock(&ap_perms_mutex); 211 212 return n; 213 } 214 215 static ssize_t apmask_store(struct device *dev, 216 struct device_attribute *attr, 217 const char *buf, size_t count) 218 { 219 int rc; 220 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 221 222 rc = ap_parse_mask_str(buf, zcdndev->perms.apm, 223 AP_DEVICES, &ap_perms_mutex); 224 if (rc) 225 return rc; 226 227 return count; 228 } 229 230 static DEVICE_ATTR_RW(apmask); 231 232 static ssize_t aqmask_show(struct device *dev, 233 struct device_attribute *attr, 234 char *buf) 235 { 236 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 237 int i, n; 238 239 if (mutex_lock_interruptible(&ap_perms_mutex)) 240 return -ERESTARTSYS; 241 242 n = sysfs_emit(buf, "0x"); 243 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++) 244 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]); 245 n += sysfs_emit_at(buf, n, "\n"); 246 247 mutex_unlock(&ap_perms_mutex); 248 249 return n; 250 } 251 252 static ssize_t aqmask_store(struct device *dev, 253 struct device_attribute *attr, 254 const char *buf, size_t count) 255 { 256 int rc; 257 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 258 259 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, 260 AP_DOMAINS, &ap_perms_mutex); 261 if (rc) 262 return rc; 263 264 return count; 265 } 266 267 static DEVICE_ATTR_RW(aqmask); 268 269 static ssize_t admask_show(struct device *dev, 270 struct device_attribute *attr, 271 char *buf) 272 { 273 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 274 int i, n; 275 276 if (mutex_lock_interruptible(&ap_perms_mutex)) 277 return -ERESTARTSYS; 278 279 n = sysfs_emit(buf, "0x"); 280 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++) 281 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]); 282 n += sysfs_emit_at(buf, n, "\n"); 283 284 mutex_unlock(&ap_perms_mutex); 285 286 return n; 287 } 288 289 static ssize_t admask_store(struct device *dev, 290 struct device_attribute *attr, 291 const char *buf, size_t count) 292 { 293 int rc; 294 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 295 296 rc = ap_parse_mask_str(buf, zcdndev->perms.adm, 297 AP_DOMAINS, &ap_perms_mutex); 298 if (rc) 299 return rc; 300 301 return count; 302 } 303 304 static DEVICE_ATTR_RW(admask); 305 306 static struct attribute *zcdn_dev_attrs[] = { 307 &dev_attr_ioctlmask.attr, 308 &dev_attr_apmask.attr, 309 &dev_attr_aqmask.attr, 310 &dev_attr_admask.attr, 311 NULL 312 }; 313 314 static struct attribute_group zcdn_dev_attr_group = { 315 .attrs = zcdn_dev_attrs 316 }; 317 318 static const struct attribute_group *zcdn_dev_attr_groups[] = { 319 &zcdn_dev_attr_group, 320 NULL 321 }; 322 323 static ssize_t zcdn_create_store(const struct class *class, 324 const struct class_attribute *attr, 325 const char *buf, size_t count) 326 { 327 int rc; 328 char name[ZCDN_MAX_NAME]; 329 330 strscpy(name, skip_spaces(buf), sizeof(name)); 331 332 rc = zcdn_create(strim(name)); 333 334 return rc ? rc : count; 335 } 336 337 static const struct class_attribute class_attr_zcdn_create = 338 __ATTR(create, 0600, NULL, zcdn_create_store); 339 340 static ssize_t zcdn_destroy_store(const struct class *class, 341 const struct class_attribute *attr, 342 const char *buf, size_t count) 343 { 344 int rc; 345 char name[ZCDN_MAX_NAME]; 346 347 strscpy(name, skip_spaces(buf), sizeof(name)); 348 349 rc = zcdn_destroy(strim(name)); 350 351 return rc ? rc : count; 352 } 353 354 static const struct class_attribute class_attr_zcdn_destroy = 355 __ATTR(destroy, 0600, NULL, zcdn_destroy_store); 356 357 static void zcdn_device_release(struct device *dev) 358 { 359 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 360 361 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n", 362 __func__, MAJOR(dev->devt), MINOR(dev->devt)); 363 364 kfree(zcdndev); 365 } 366 367 static int zcdn_create(const char *name) 368 { 369 dev_t devt; 370 int i, rc = 0; 371 struct zcdn_device *zcdndev; 372 373 if (mutex_lock_interruptible(&ap_perms_mutex)) 374 return -ERESTARTSYS; 375 376 /* check if device node with this name already exists */ 377 if (name[0]) { 378 zcdndev = find_zcdndev_by_name(name); 379 if (zcdndev) { 380 put_device(&zcdndev->device); 381 rc = -EEXIST; 382 goto unlockout; 383 } 384 } 385 386 /* find an unused minor number */ 387 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 388 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 389 zcdndev = find_zcdndev_by_devt(devt); 390 if (zcdndev) 391 put_device(&zcdndev->device); 392 else 393 break; 394 } 395 if (i == ZCRYPT_MAX_MINOR_NODES) { 396 rc = -ENOSPC; 397 goto unlockout; 398 } 399 400 /* alloc and prepare a new zcdn device */ 401 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL); 402 if (!zcdndev) { 403 rc = -ENOMEM; 404 goto unlockout; 405 } 406 zcdndev->device.release = zcdn_device_release; 407 zcdndev->device.class = &zcrypt_class; 408 zcdndev->device.devt = devt; 409 zcdndev->device.groups = zcdn_dev_attr_groups; 410 if (name[0]) 411 rc = dev_set_name(&zcdndev->device, "%s", name); 412 else 413 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt)); 414 if (rc) { 415 kfree(zcdndev); 416 goto unlockout; 417 } 418 rc = device_register(&zcdndev->device); 419 if (rc) { 420 put_device(&zcdndev->device); 421 goto unlockout; 422 } 423 424 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n", 425 __func__, MAJOR(devt), MINOR(devt)); 426 427 unlockout: 428 mutex_unlock(&ap_perms_mutex); 429 return rc; 430 } 431 432 static int zcdn_destroy(const char *name) 433 { 434 int rc = 0; 435 struct zcdn_device *zcdndev; 436 437 if (mutex_lock_interruptible(&ap_perms_mutex)) 438 return -ERESTARTSYS; 439 440 /* try to find this zcdn device */ 441 zcdndev = find_zcdndev_by_name(name); 442 if (!zcdndev) { 443 rc = -ENOENT; 444 goto unlockout; 445 } 446 447 /* 448 * The zcdn device is not hard destroyed. It is subject to 449 * reference counting and thus just needs to be unregistered. 450 */ 451 put_device(&zcdndev->device); 452 device_unregister(&zcdndev->device); 453 454 unlockout: 455 mutex_unlock(&ap_perms_mutex); 456 return rc; 457 } 458 459 static void zcdn_destroy_all(void) 460 { 461 int i; 462 dev_t devt; 463 struct zcdn_device *zcdndev; 464 465 mutex_lock(&ap_perms_mutex); 466 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 467 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 468 zcdndev = find_zcdndev_by_devt(devt); 469 if (zcdndev) { 470 put_device(&zcdndev->device); 471 device_unregister(&zcdndev->device); 472 } 473 } 474 mutex_unlock(&ap_perms_mutex); 475 } 476 477 /* 478 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 479 * 480 * This function is not supported beyond zcrypt 1.3.1. 481 */ 482 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 483 size_t count, loff_t *f_pos) 484 { 485 return -EPERM; 486 } 487 488 /* 489 * zcrypt_write(): Not allowed. 490 * 491 * Write is not allowed 492 */ 493 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 494 size_t count, loff_t *f_pos) 495 { 496 return -EPERM; 497 } 498 499 /* 500 * zcrypt_open(): Count number of users. 501 * 502 * Device open function to count number of users. 503 */ 504 static int zcrypt_open(struct inode *inode, struct file *filp) 505 { 506 struct ap_perms *perms = &ap_perms; 507 508 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 509 struct zcdn_device *zcdndev; 510 511 if (mutex_lock_interruptible(&ap_perms_mutex)) 512 return -ERESTARTSYS; 513 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 514 /* find returns a reference, no get_device() needed */ 515 mutex_unlock(&ap_perms_mutex); 516 if (zcdndev) 517 perms = &zcdndev->perms; 518 } 519 filp->private_data = (void *)perms; 520 521 atomic_inc(&zcrypt_open_count); 522 return stream_open(inode, filp); 523 } 524 525 /* 526 * zcrypt_release(): Count number of users. 527 * 528 * Device close function to count number of users. 529 */ 530 static int zcrypt_release(struct inode *inode, struct file *filp) 531 { 532 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 533 struct zcdn_device *zcdndev; 534 535 mutex_lock(&ap_perms_mutex); 536 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 537 mutex_unlock(&ap_perms_mutex); 538 if (zcdndev) { 539 /* 2 puts here: one for find, one for open */ 540 put_device(&zcdndev->device); 541 put_device(&zcdndev->device); 542 } 543 } 544 545 atomic_dec(&zcrypt_open_count); 546 return 0; 547 } 548 549 static inline int zcrypt_check_ioctl(struct ap_perms *perms, 550 unsigned int cmd) 551 { 552 int rc = -EPERM; 553 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT; 554 555 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) { 556 if (test_bit_inv(ioctlnr, perms->ioctlm)) 557 rc = 0; 558 } 559 560 if (rc) 561 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n", 562 __func__, ioctlnr, rc); 563 564 return rc; 565 } 566 567 static inline bool zcrypt_check_card(struct ap_perms *perms, int card) 568 { 569 return test_bit_inv(card, perms->apm) ? true : false; 570 } 571 572 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) 573 { 574 return test_bit_inv(queue, perms->aqm) ? true : false; 575 } 576 577 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 578 struct zcrypt_queue *zq, 579 struct module **pmod, 580 unsigned int weight) 581 { 582 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) 583 return NULL; 584 zcrypt_card_get(zc); 585 zcrypt_queue_get(zq); 586 get_device(&zq->queue->ap_dev.device); 587 atomic_add(weight, &zc->load); 588 atomic_add(weight, &zq->load); 589 zq->request_count++; 590 *pmod = zq->queue->ap_dev.device.driver->owner; 591 return zq; 592 } 593 594 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 595 struct zcrypt_queue *zq, 596 struct module *mod, 597 unsigned int weight) 598 { 599 zq->request_count--; 600 atomic_sub(weight, &zc->load); 601 atomic_sub(weight, &zq->load); 602 put_device(&zq->queue->ap_dev.device); 603 zcrypt_queue_put(zq); 604 zcrypt_card_put(zc); 605 module_put(mod); 606 } 607 608 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 609 struct zcrypt_card *pref_zc, 610 unsigned int weight, 611 unsigned int pref_weight) 612 { 613 if (!pref_zc) 614 return true; 615 weight += atomic_read(&zc->load); 616 pref_weight += atomic_read(&pref_zc->load); 617 if (weight == pref_weight) 618 return atomic64_read(&zc->card->total_request_count) < 619 atomic64_read(&pref_zc->card->total_request_count); 620 return weight < pref_weight; 621 } 622 623 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 624 struct zcrypt_queue *pref_zq, 625 unsigned int weight, 626 unsigned int pref_weight) 627 { 628 if (!pref_zq) 629 return true; 630 weight += atomic_read(&zq->load); 631 pref_weight += atomic_read(&pref_zq->load); 632 if (weight == pref_weight) 633 return zq->queue->total_request_count < 634 pref_zq->queue->total_request_count; 635 return weight < pref_weight; 636 } 637 638 /* 639 * zcrypt ioctls. 640 */ 641 static long zcrypt_rsa_modexpo(struct ap_perms *perms, 642 struct zcrypt_track *tr, 643 struct ica_rsa_modexpo *mex) 644 { 645 struct zcrypt_card *zc, *pref_zc; 646 struct zcrypt_queue *zq, *pref_zq; 647 struct ap_message ap_msg; 648 unsigned int wgt = 0, pref_wgt = 0; 649 unsigned int func_code = 0; 650 int cpen, qpen, qid = 0, rc; 651 struct module *mod; 652 653 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 654 655 rc = ap_init_apmsg(&ap_msg, 0); 656 if (rc) 657 goto out; 658 659 if (mex->outputdatalength < mex->inputdatalength) { 660 rc = -EINVAL; 661 goto out; 662 } 663 664 /* 665 * As long as outputdatalength is big enough, we can set the 666 * outputdatalength equal to the inputdatalength, since that is the 667 * number of bytes we will copy in any case 668 */ 669 mex->outputdatalength = mex->inputdatalength; 670 671 rc = get_rsa_modex_fc(mex, &func_code); 672 if (rc) 673 goto out; 674 675 pref_zc = NULL; 676 pref_zq = NULL; 677 spin_lock(&zcrypt_list_lock); 678 for_each_zcrypt_card(zc) { 679 /* Check for usable accelerator or CCA card */ 680 if (!zc->online || !zc->card->config || zc->card->chkstop || 681 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) 682 continue; 683 /* Check for size limits */ 684 if (zc->min_mod_size > mex->inputdatalength || 685 zc->max_mod_size < mex->inputdatalength) 686 continue; 687 /* check if device node has admission for this card */ 688 if (!zcrypt_check_card(perms, zc->card->id)) 689 continue; 690 /* get weight index of the card device */ 691 wgt = zc->speed_rating[func_code]; 692 /* penalty if this msg was previously sent via this card */ 693 cpen = (tr && tr->again_counter && tr->last_qid && 694 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 695 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 696 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 697 continue; 698 for_each_zcrypt_queue(zq, zc) { 699 /* check if device is usable and eligible */ 700 if (!zq->online || !zq->ops->rsa_modexpo || 701 !ap_queue_usable(zq->queue)) 702 continue; 703 /* check if device node has admission for this queue */ 704 if (!zcrypt_check_queue(perms, 705 AP_QID_QUEUE(zq->queue->qid))) 706 continue; 707 /* penalty if the msg was previously sent at this qid */ 708 qpen = (tr && tr->again_counter && tr->last_qid && 709 tr->last_qid == zq->queue->qid) ? 710 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 711 if (!zcrypt_queue_compare(zq, pref_zq, 712 wgt + cpen + qpen, pref_wgt)) 713 continue; 714 pref_zc = zc; 715 pref_zq = zq; 716 pref_wgt = wgt + cpen + qpen; 717 } 718 } 719 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 720 spin_unlock(&zcrypt_list_lock); 721 722 if (!pref_zq) { 723 pr_debug("no matching queue found => ENODEV\n"); 724 rc = -ENODEV; 725 goto out; 726 } 727 728 qid = pref_zq->queue->qid; 729 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg); 730 731 spin_lock(&zcrypt_list_lock); 732 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 733 spin_unlock(&zcrypt_list_lock); 734 735 out: 736 ap_release_apmsg(&ap_msg); 737 if (tr) { 738 tr->last_rc = rc; 739 tr->last_qid = qid; 740 } 741 trace_s390_zcrypt_rep(mex, func_code, rc, 742 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 743 return rc; 744 } 745 746 static long zcrypt_rsa_crt(struct ap_perms *perms, 747 struct zcrypt_track *tr, 748 struct ica_rsa_modexpo_crt *crt) 749 { 750 struct zcrypt_card *zc, *pref_zc; 751 struct zcrypt_queue *zq, *pref_zq; 752 struct ap_message ap_msg; 753 unsigned int wgt = 0, pref_wgt = 0; 754 unsigned int func_code = 0; 755 int cpen, qpen, qid = 0, rc; 756 struct module *mod; 757 758 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 759 760 rc = ap_init_apmsg(&ap_msg, 0); 761 if (rc) 762 goto out; 763 764 if (crt->outputdatalength < crt->inputdatalength) { 765 rc = -EINVAL; 766 goto out; 767 } 768 769 /* 770 * As long as outputdatalength is big enough, we can set the 771 * outputdatalength equal to the inputdatalength, since that is the 772 * number of bytes we will copy in any case 773 */ 774 crt->outputdatalength = crt->inputdatalength; 775 776 rc = get_rsa_crt_fc(crt, &func_code); 777 if (rc) 778 goto out; 779 780 pref_zc = NULL; 781 pref_zq = NULL; 782 spin_lock(&zcrypt_list_lock); 783 for_each_zcrypt_card(zc) { 784 /* Check for usable accelerator or CCA card */ 785 if (!zc->online || !zc->card->config || zc->card->chkstop || 786 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) 787 continue; 788 /* Check for size limits */ 789 if (zc->min_mod_size > crt->inputdatalength || 790 zc->max_mod_size < crt->inputdatalength) 791 continue; 792 /* check if device node has admission for this card */ 793 if (!zcrypt_check_card(perms, zc->card->id)) 794 continue; 795 /* get weight index of the card device */ 796 wgt = zc->speed_rating[func_code]; 797 /* penalty if this msg was previously sent via this card */ 798 cpen = (tr && tr->again_counter && tr->last_qid && 799 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 800 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 801 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 802 continue; 803 for_each_zcrypt_queue(zq, zc) { 804 /* check if device is usable and eligible */ 805 if (!zq->online || !zq->ops->rsa_modexpo_crt || 806 !ap_queue_usable(zq->queue)) 807 continue; 808 /* check if device node has admission for this queue */ 809 if (!zcrypt_check_queue(perms, 810 AP_QID_QUEUE(zq->queue->qid))) 811 continue; 812 /* penalty if the msg was previously sent at this qid */ 813 qpen = (tr && tr->again_counter && tr->last_qid && 814 tr->last_qid == zq->queue->qid) ? 815 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 816 if (!zcrypt_queue_compare(zq, pref_zq, 817 wgt + cpen + qpen, pref_wgt)) 818 continue; 819 pref_zc = zc; 820 pref_zq = zq; 821 pref_wgt = wgt + cpen + qpen; 822 } 823 } 824 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 825 spin_unlock(&zcrypt_list_lock); 826 827 if (!pref_zq) { 828 pr_debug("no matching queue found => ENODEV\n"); 829 rc = -ENODEV; 830 goto out; 831 } 832 833 qid = pref_zq->queue->qid; 834 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg); 835 836 spin_lock(&zcrypt_list_lock); 837 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 838 spin_unlock(&zcrypt_list_lock); 839 840 out: 841 ap_release_apmsg(&ap_msg); 842 if (tr) { 843 tr->last_rc = rc; 844 tr->last_qid = qid; 845 } 846 trace_s390_zcrypt_rep(crt, func_code, rc, 847 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 848 return rc; 849 } 850 851 static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms, 852 struct zcrypt_track *tr, 853 struct ica_xcRB *xcrb) 854 { 855 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; 856 struct zcrypt_card *zc, *pref_zc; 857 struct zcrypt_queue *zq, *pref_zq; 858 struct ap_message ap_msg; 859 unsigned int wgt = 0, pref_wgt = 0; 860 unsigned int func_code = 0; 861 unsigned short *domain, tdom; 862 int cpen, qpen, qid = 0, rc; 863 struct module *mod; 864 865 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); 866 867 xcrb->status = 0; 868 869 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? 870 AP_MSG_FLAG_MEMPOOL : 0); 871 if (rc) 872 goto out; 873 874 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 875 if (rc) 876 goto out; 877 print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1, 878 ap_msg.msg, ap_msg.len, false); 879 880 tdom = *domain; 881 if (perms != &ap_perms && tdom < AP_DOMAINS) { 882 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 883 if (!test_bit_inv(tdom, perms->adm)) { 884 rc = -ENODEV; 885 goto out; 886 } 887 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 888 rc = -EOPNOTSUPP; 889 goto out; 890 } 891 } 892 /* 893 * If a valid target domain is set and this domain is NOT a usage 894 * domain but a control only domain, autoselect target domain. 895 */ 896 if (tdom < AP_DOMAINS && 897 !ap_test_config_usage_domain(tdom) && 898 ap_test_config_ctrl_domain(tdom)) 899 tdom = AUTOSEL_DOM; 900 901 pref_zc = NULL; 902 pref_zq = NULL; 903 spin_lock(&zcrypt_list_lock); 904 for_each_zcrypt_card(zc) { 905 /* Check for usable CCA card */ 906 if (!zc->online || !zc->card->config || zc->card->chkstop || 907 !zc->card->hwinfo.cca) 908 continue; 909 /* Check for user selected CCA card */ 910 if (xcrb->user_defined != AUTOSELECT && 911 xcrb->user_defined != zc->card->id) 912 continue; 913 /* check if request size exceeds card max msg size */ 914 if (ap_msg.len > zc->card->maxmsgsize) 915 continue; 916 /* check if device node has admission for this card */ 917 if (!zcrypt_check_card(perms, zc->card->id)) 918 continue; 919 /* get weight index of the card device */ 920 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 921 /* penalty if this msg was previously sent via this card */ 922 cpen = (tr && tr->again_counter && tr->last_qid && 923 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 924 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 925 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 926 continue; 927 for_each_zcrypt_queue(zq, zc) { 928 /* check for device usable and eligible */ 929 if (!zq->online || !zq->ops->send_cprb || 930 !ap_queue_usable(zq->queue) || 931 (tdom != AUTOSEL_DOM && 932 tdom != AP_QID_QUEUE(zq->queue->qid))) 933 continue; 934 /* check if device node has admission for this queue */ 935 if (!zcrypt_check_queue(perms, 936 AP_QID_QUEUE(zq->queue->qid))) 937 continue; 938 /* penalty if the msg was previously sent at this qid */ 939 qpen = (tr && tr->again_counter && tr->last_qid && 940 tr->last_qid == zq->queue->qid) ? 941 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 942 if (!zcrypt_queue_compare(zq, pref_zq, 943 wgt + cpen + qpen, pref_wgt)) 944 continue; 945 pref_zc = zc; 946 pref_zq = zq; 947 pref_wgt = wgt + cpen + qpen; 948 } 949 } 950 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 951 spin_unlock(&zcrypt_list_lock); 952 953 if (!pref_zq) { 954 pr_debug("no match for address %02x.%04x => ENODEV\n", 955 xcrb->user_defined, *domain); 956 rc = -ENODEV; 957 goto out; 958 } 959 960 /* in case of auto select, provide the correct domain */ 961 qid = pref_zq->queue->qid; 962 if (*domain == AUTOSEL_DOM) 963 *domain = AP_QID_QUEUE(qid); 964 965 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); 966 if (!rc) { 967 print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1, 968 ap_msg.msg, ap_msg.len, false); 969 } 970 971 spin_lock(&zcrypt_list_lock); 972 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 973 spin_unlock(&zcrypt_list_lock); 974 975 out: 976 ap_release_apmsg(&ap_msg); 977 if (tr) { 978 tr->last_rc = rc; 979 tr->last_qid = qid; 980 } 981 trace_s390_zcrypt_rep(xcrb, func_code, rc, 982 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 983 return rc; 984 } 985 986 long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags) 987 { 988 struct zcrypt_track tr; 989 int rc; 990 991 memset(&tr, 0, sizeof(tr)); 992 993 do { 994 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); 995 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 996 997 /* on ENODEV failure: retry once again after a requested rescan */ 998 if (rc == -ENODEV && zcrypt_process_rescan()) 999 do { 1000 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); 1001 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1002 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1003 rc = -EIO; 1004 if (rc) 1005 pr_debug("rc=%d\n", rc); 1006 1007 return rc; 1008 } 1009 EXPORT_SYMBOL(zcrypt_send_cprb); 1010 1011 static bool is_desired_ep11_card(unsigned int dev_id, 1012 unsigned short target_num, 1013 struct ep11_target_dev *targets) 1014 { 1015 while (target_num-- > 0) { 1016 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP) 1017 return true; 1018 targets++; 1019 } 1020 return false; 1021 } 1022 1023 static bool is_desired_ep11_queue(unsigned int dev_qid, 1024 unsigned short target_num, 1025 struct ep11_target_dev *targets) 1026 { 1027 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid); 1028 1029 while (target_num-- > 0) { 1030 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) && 1031 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM)) 1032 return true; 1033 targets++; 1034 } 1035 return false; 1036 } 1037 1038 static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms, 1039 struct zcrypt_track *tr, 1040 struct ep11_urb *xcrb) 1041 { 1042 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; 1043 struct zcrypt_card *zc, *pref_zc; 1044 struct zcrypt_queue *zq, *pref_zq; 1045 struct ep11_target_dev *targets = NULL; 1046 unsigned short target_num; 1047 unsigned int wgt = 0, pref_wgt = 0; 1048 unsigned int func_code = 0, domain; 1049 struct ap_message ap_msg; 1050 int cpen, qpen, qid = 0, rc; 1051 struct module *mod; 1052 1053 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 1054 1055 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? 1056 AP_MSG_FLAG_MEMPOOL : 0); 1057 if (rc) 1058 goto out; 1059 1060 target_num = (unsigned short)xcrb->targets_num; 1061 1062 /* empty list indicates autoselect (all available targets) */ 1063 rc = -ENOMEM; 1064 if (target_num != 0) { 1065 if (userspace) { 1066 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 1067 if (!targets) 1068 goto out; 1069 if (copy_from_user(targets, xcrb->targets, 1070 target_num * sizeof(*targets))) { 1071 rc = -EFAULT; 1072 goto out; 1073 } 1074 } else { 1075 targets = (struct ep11_target_dev __force __kernel *)xcrb->targets; 1076 } 1077 } 1078 1079 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 1080 if (rc) 1081 goto out; 1082 print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1, 1083 ap_msg.msg, ap_msg.len, false); 1084 1085 if (perms != &ap_perms && domain < AUTOSEL_DOM) { 1086 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 1087 if (!test_bit_inv(domain, perms->adm)) { 1088 rc = -ENODEV; 1089 goto out; 1090 } 1091 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 1092 rc = -EOPNOTSUPP; 1093 goto out; 1094 } 1095 } 1096 1097 pref_zc = NULL; 1098 pref_zq = NULL; 1099 spin_lock(&zcrypt_list_lock); 1100 for_each_zcrypt_card(zc) { 1101 /* Check for usable EP11 card */ 1102 if (!zc->online || !zc->card->config || zc->card->chkstop || 1103 !zc->card->hwinfo.ep11) 1104 continue; 1105 /* Check for user selected EP11 card */ 1106 if (targets && 1107 !is_desired_ep11_card(zc->card->id, target_num, targets)) 1108 continue; 1109 /* check if request size exceeds card max msg size */ 1110 if (ap_msg.len > zc->card->maxmsgsize) 1111 continue; 1112 /* check if device node has admission for this card */ 1113 if (!zcrypt_check_card(perms, zc->card->id)) 1114 continue; 1115 /* get weight index of the card device */ 1116 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 1117 /* penalty if this msg was previously sent via this card */ 1118 cpen = (tr && tr->again_counter && tr->last_qid && 1119 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 1120 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 1121 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 1122 continue; 1123 for_each_zcrypt_queue(zq, zc) { 1124 /* check if device is usable and eligible */ 1125 if (!zq->online || !zq->ops->send_ep11_cprb || 1126 !ap_queue_usable(zq->queue) || 1127 (targets && 1128 !is_desired_ep11_queue(zq->queue->qid, 1129 target_num, targets))) 1130 continue; 1131 /* check if device node has admission for this queue */ 1132 if (!zcrypt_check_queue(perms, 1133 AP_QID_QUEUE(zq->queue->qid))) 1134 continue; 1135 /* penalty if the msg was previously sent at this qid */ 1136 qpen = (tr && tr->again_counter && tr->last_qid && 1137 tr->last_qid == zq->queue->qid) ? 1138 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 1139 if (!zcrypt_queue_compare(zq, pref_zq, 1140 wgt + cpen + qpen, pref_wgt)) 1141 continue; 1142 pref_zc = zc; 1143 pref_zq = zq; 1144 pref_wgt = wgt + cpen + qpen; 1145 } 1146 } 1147 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1148 spin_unlock(&zcrypt_list_lock); 1149 1150 if (!pref_zq) { 1151 if (targets && target_num == 1) { 1152 pr_debug("no match for address %02x.%04x => ENODEV\n", 1153 (int)targets->ap_id, (int)targets->dom_id); 1154 } else if (targets) { 1155 pr_debug("no match for %d target addrs => ENODEV\n", 1156 (int)target_num); 1157 } else { 1158 pr_debug("no match for address ff.ffff => ENODEV\n"); 1159 } 1160 rc = -ENODEV; 1161 goto out; 1162 } 1163 1164 qid = pref_zq->queue->qid; 1165 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg); 1166 if (!rc) { 1167 print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1, 1168 ap_msg.msg, ap_msg.len, false); 1169 } 1170 1171 spin_lock(&zcrypt_list_lock); 1172 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1173 spin_unlock(&zcrypt_list_lock); 1174 1175 out: 1176 if (userspace) 1177 kfree(targets); 1178 ap_release_apmsg(&ap_msg); 1179 if (tr) { 1180 tr->last_rc = rc; 1181 tr->last_qid = qid; 1182 } 1183 trace_s390_zcrypt_rep(xcrb, func_code, rc, 1184 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1185 return rc; 1186 } 1187 1188 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags) 1189 { 1190 struct zcrypt_track tr; 1191 int rc; 1192 1193 memset(&tr, 0, sizeof(tr)); 1194 1195 do { 1196 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); 1197 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1198 1199 /* on ENODEV failure: retry once again after a requested rescan */ 1200 if (rc == -ENODEV && zcrypt_process_rescan()) 1201 do { 1202 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); 1203 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1204 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1205 rc = -EIO; 1206 if (rc) 1207 pr_debug("rc=%d\n", rc); 1208 1209 return rc; 1210 } 1211 EXPORT_SYMBOL(zcrypt_send_ep11_cprb); 1212 1213 static long zcrypt_rng(char *buffer) 1214 { 1215 struct zcrypt_card *zc, *pref_zc; 1216 struct zcrypt_queue *zq, *pref_zq; 1217 unsigned int wgt = 0, pref_wgt = 0; 1218 unsigned int func_code = 0; 1219 struct ap_message ap_msg; 1220 unsigned int domain; 1221 int qid = 0, rc = -ENODEV; 1222 struct module *mod; 1223 1224 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1225 1226 rc = ap_init_apmsg(&ap_msg, 0); 1227 if (rc) 1228 goto out; 1229 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); 1230 if (rc) 1231 goto out; 1232 1233 pref_zc = NULL; 1234 pref_zq = NULL; 1235 spin_lock(&zcrypt_list_lock); 1236 for_each_zcrypt_card(zc) { 1237 /* Check for usable CCA card */ 1238 if (!zc->online || !zc->card->config || zc->card->chkstop || 1239 !zc->card->hwinfo.cca) 1240 continue; 1241 /* get weight index of the card device */ 1242 wgt = zc->speed_rating[func_code]; 1243 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) 1244 continue; 1245 for_each_zcrypt_queue(zq, zc) { 1246 /* check if device is usable and eligible */ 1247 if (!zq->online || !zq->ops->rng || 1248 !ap_queue_usable(zq->queue)) 1249 continue; 1250 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt)) 1251 continue; 1252 pref_zc = zc; 1253 pref_zq = zq; 1254 pref_wgt = wgt; 1255 } 1256 } 1257 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1258 spin_unlock(&zcrypt_list_lock); 1259 1260 if (!pref_zq) { 1261 pr_debug("no matching queue found => ENODEV\n"); 1262 rc = -ENODEV; 1263 goto out; 1264 } 1265 1266 qid = pref_zq->queue->qid; 1267 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1268 1269 spin_lock(&zcrypt_list_lock); 1270 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1271 spin_unlock(&zcrypt_list_lock); 1272 1273 out: 1274 ap_release_apmsg(&ap_msg); 1275 trace_s390_zcrypt_rep(buffer, func_code, rc, 1276 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1277 return rc; 1278 } 1279 1280 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 1281 { 1282 struct zcrypt_card *zc; 1283 struct zcrypt_queue *zq; 1284 struct zcrypt_device_status *stat; 1285 int card, queue; 1286 1287 memset(devstatus, 0, MAX_ZDEV_ENTRIES 1288 * sizeof(struct zcrypt_device_status)); 1289 1290 spin_lock(&zcrypt_list_lock); 1291 for_each_zcrypt_card(zc) { 1292 for_each_zcrypt_queue(zq, zc) { 1293 card = AP_QID_CARD(zq->queue->qid); 1294 if (card >= MAX_ZDEV_CARDIDS) 1295 continue; 1296 queue = AP_QID_QUEUE(zq->queue->qid); 1297 stat = &devstatus[card * AP_DOMAINS + queue]; 1298 stat->hwtype = zc->card->ap_dev.device_type; 1299 stat->functions = zc->card->hwinfo.fac >> 26; 1300 stat->qid = zq->queue->qid; 1301 stat->online = zq->online ? 0x01 : 0x00; 1302 } 1303 } 1304 spin_unlock(&zcrypt_list_lock); 1305 } 1306 1307 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, 1308 int maxcard, int maxqueue) 1309 { 1310 struct zcrypt_card *zc; 1311 struct zcrypt_queue *zq; 1312 struct zcrypt_device_status_ext *stat; 1313 int card, queue; 1314 1315 maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT); 1316 maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT); 1317 1318 spin_lock(&zcrypt_list_lock); 1319 for_each_zcrypt_card(zc) { 1320 for_each_zcrypt_queue(zq, zc) { 1321 card = AP_QID_CARD(zq->queue->qid); 1322 queue = AP_QID_QUEUE(zq->queue->qid); 1323 if (card >= maxcard || queue >= maxqueue) 1324 continue; 1325 stat = &devstatus[card * maxqueue + queue]; 1326 stat->hwtype = zc->card->ap_dev.device_type; 1327 stat->functions = zc->card->hwinfo.fac >> 26; 1328 stat->qid = zq->queue->qid; 1329 stat->online = zq->online ? 0x01 : 0x00; 1330 } 1331 } 1332 spin_unlock(&zcrypt_list_lock); 1333 } 1334 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 1335 1336 int zcrypt_device_status_ext(int card, int queue, 1337 struct zcrypt_device_status_ext *devstat) 1338 { 1339 struct zcrypt_card *zc; 1340 struct zcrypt_queue *zq; 1341 1342 memset(devstat, 0, sizeof(*devstat)); 1343 1344 spin_lock(&zcrypt_list_lock); 1345 for_each_zcrypt_card(zc) { 1346 for_each_zcrypt_queue(zq, zc) { 1347 if (card == AP_QID_CARD(zq->queue->qid) && 1348 queue == AP_QID_QUEUE(zq->queue->qid)) { 1349 devstat->hwtype = zc->card->ap_dev.device_type; 1350 devstat->functions = zc->card->hwinfo.fac >> 26; 1351 devstat->qid = zq->queue->qid; 1352 devstat->online = zq->online ? 0x01 : 0x00; 1353 spin_unlock(&zcrypt_list_lock); 1354 return 0; 1355 } 1356 } 1357 } 1358 spin_unlock(&zcrypt_list_lock); 1359 1360 return -ENODEV; 1361 } 1362 EXPORT_SYMBOL(zcrypt_device_status_ext); 1363 1364 static void zcrypt_status_mask(char status[], size_t max_adapters) 1365 { 1366 struct zcrypt_card *zc; 1367 struct zcrypt_queue *zq; 1368 int card; 1369 1370 memset(status, 0, max_adapters); 1371 spin_lock(&zcrypt_list_lock); 1372 for_each_zcrypt_card(zc) { 1373 for_each_zcrypt_queue(zq, zc) { 1374 card = AP_QID_CARD(zq->queue->qid); 1375 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1376 card >= max_adapters) 1377 continue; 1378 status[card] = zc->online ? zc->user_space_type : 0x0d; 1379 } 1380 } 1381 spin_unlock(&zcrypt_list_lock); 1382 } 1383 1384 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 1385 { 1386 struct zcrypt_card *zc; 1387 struct zcrypt_queue *zq; 1388 int card; 1389 1390 memset(qdepth, 0, max_adapters); 1391 spin_lock(&zcrypt_list_lock); 1392 local_bh_disable(); 1393 for_each_zcrypt_card(zc) { 1394 for_each_zcrypt_queue(zq, zc) { 1395 card = AP_QID_CARD(zq->queue->qid); 1396 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1397 card >= max_adapters) 1398 continue; 1399 spin_lock(&zq->queue->lock); 1400 qdepth[card] = 1401 zq->queue->pendingq_count + 1402 zq->queue->requestq_count; 1403 spin_unlock(&zq->queue->lock); 1404 } 1405 } 1406 local_bh_enable(); 1407 spin_unlock(&zcrypt_list_lock); 1408 } 1409 1410 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) 1411 { 1412 struct zcrypt_card *zc; 1413 struct zcrypt_queue *zq; 1414 int card; 1415 u64 cnt; 1416 1417 memset(reqcnt, 0, sizeof(int) * max_adapters); 1418 spin_lock(&zcrypt_list_lock); 1419 local_bh_disable(); 1420 for_each_zcrypt_card(zc) { 1421 for_each_zcrypt_queue(zq, zc) { 1422 card = AP_QID_CARD(zq->queue->qid); 1423 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1424 card >= max_adapters) 1425 continue; 1426 spin_lock(&zq->queue->lock); 1427 cnt = zq->queue->total_request_count; 1428 spin_unlock(&zq->queue->lock); 1429 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX; 1430 } 1431 } 1432 local_bh_enable(); 1433 spin_unlock(&zcrypt_list_lock); 1434 } 1435 1436 static int zcrypt_pendingq_count(void) 1437 { 1438 struct zcrypt_card *zc; 1439 struct zcrypt_queue *zq; 1440 int pendingq_count; 1441 1442 pendingq_count = 0; 1443 spin_lock(&zcrypt_list_lock); 1444 local_bh_disable(); 1445 for_each_zcrypt_card(zc) { 1446 for_each_zcrypt_queue(zq, zc) { 1447 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1448 continue; 1449 spin_lock(&zq->queue->lock); 1450 pendingq_count += zq->queue->pendingq_count; 1451 spin_unlock(&zq->queue->lock); 1452 } 1453 } 1454 local_bh_enable(); 1455 spin_unlock(&zcrypt_list_lock); 1456 return pendingq_count; 1457 } 1458 1459 static int zcrypt_requestq_count(void) 1460 { 1461 struct zcrypt_card *zc; 1462 struct zcrypt_queue *zq; 1463 int requestq_count; 1464 1465 requestq_count = 0; 1466 spin_lock(&zcrypt_list_lock); 1467 local_bh_disable(); 1468 for_each_zcrypt_card(zc) { 1469 for_each_zcrypt_queue(zq, zc) { 1470 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1471 continue; 1472 spin_lock(&zq->queue->lock); 1473 requestq_count += zq->queue->requestq_count; 1474 spin_unlock(&zq->queue->lock); 1475 } 1476 } 1477 local_bh_enable(); 1478 spin_unlock(&zcrypt_list_lock); 1479 return requestq_count; 1480 } 1481 1482 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) 1483 { 1484 int rc; 1485 struct zcrypt_track tr; 1486 struct ica_rsa_modexpo mex; 1487 struct ica_rsa_modexpo __user *umex = (void __user *)arg; 1488 1489 memset(&tr, 0, sizeof(tr)); 1490 if (copy_from_user(&mex, umex, sizeof(mex))) 1491 return -EFAULT; 1492 1493 do { 1494 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1495 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1496 1497 /* on ENODEV failure: retry once again after a requested rescan */ 1498 if (rc == -ENODEV && zcrypt_process_rescan()) 1499 do { 1500 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1501 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1502 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1503 rc = -EIO; 1504 if (rc) { 1505 pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc); 1506 return rc; 1507 } 1508 return put_user(mex.outputdatalength, &umex->outputdatalength); 1509 } 1510 1511 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) 1512 { 1513 int rc; 1514 struct zcrypt_track tr; 1515 struct ica_rsa_modexpo_crt crt; 1516 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg; 1517 1518 memset(&tr, 0, sizeof(tr)); 1519 if (copy_from_user(&crt, ucrt, sizeof(crt))) 1520 return -EFAULT; 1521 1522 do { 1523 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1524 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1525 1526 /* on ENODEV failure: retry once again after a requested rescan */ 1527 if (rc == -ENODEV && zcrypt_process_rescan()) 1528 do { 1529 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1530 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1531 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1532 rc = -EIO; 1533 if (rc) { 1534 pr_debug("ioctl ICARSACRT rc=%d\n", rc); 1535 return rc; 1536 } 1537 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 1538 } 1539 1540 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) 1541 { 1542 int rc; 1543 struct ica_xcRB xcrb; 1544 struct zcrypt_track tr; 1545 u32 xflags = ZCRYPT_XFLAG_USERSPACE; 1546 struct ica_xcRB __user *uxcrb = (void __user *)arg; 1547 1548 memset(&tr, 0, sizeof(tr)); 1549 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1550 return -EFAULT; 1551 1552 do { 1553 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); 1554 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1555 1556 /* on ENODEV failure: retry once again after a requested rescan */ 1557 if (rc == -ENODEV && zcrypt_process_rescan()) 1558 do { 1559 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); 1560 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1561 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1562 rc = -EIO; 1563 if (rc) 1564 pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n", 1565 rc, xcrb.status); 1566 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1567 return -EFAULT; 1568 return rc; 1569 } 1570 1571 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) 1572 { 1573 int rc; 1574 struct ep11_urb xcrb; 1575 struct zcrypt_track tr; 1576 u32 xflags = ZCRYPT_XFLAG_USERSPACE; 1577 struct ep11_urb __user *uxcrb = (void __user *)arg; 1578 1579 memset(&tr, 0, sizeof(tr)); 1580 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1581 return -EFAULT; 1582 1583 do { 1584 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); 1585 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1586 1587 /* on ENODEV failure: retry once again after a requested rescan */ 1588 if (rc == -ENODEV && zcrypt_process_rescan()) 1589 do { 1590 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); 1591 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1592 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1593 rc = -EIO; 1594 if (rc) 1595 pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc); 1596 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1597 return -EFAULT; 1598 return rc; 1599 } 1600 1601 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 1602 unsigned long arg) 1603 { 1604 int rc; 1605 struct ap_perms *perms = 1606 (struct ap_perms *)filp->private_data; 1607 1608 rc = zcrypt_check_ioctl(perms, cmd); 1609 if (rc) 1610 return rc; 1611 1612 switch (cmd) { 1613 case ICARSAMODEXPO: 1614 return icarsamodexpo_ioctl(perms, arg); 1615 case ICARSACRT: 1616 return icarsacrt_ioctl(perms, arg); 1617 case ZSECSENDCPRB: 1618 return zsecsendcprb_ioctl(perms, arg); 1619 case ZSENDEP11CPRB: 1620 return zsendep11cprb_ioctl(perms, arg); 1621 case ZCRYPT_DEVICE_STATUS: { 1622 struct zcrypt_device_status_ext *device_status; 1623 size_t total_size = MAX_ZDEV_ENTRIES_EXT 1624 * sizeof(struct zcrypt_device_status_ext); 1625 1626 device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, 1627 sizeof(struct zcrypt_device_status_ext), 1628 GFP_KERNEL); 1629 if (!device_status) 1630 return -ENOMEM; 1631 zcrypt_device_status_mask_ext(device_status, 1632 MAX_ZDEV_CARDIDS_EXT, 1633 MAX_ZDEV_DOMAINS_EXT); 1634 if (copy_to_user((char __user *)arg, device_status, 1635 total_size)) 1636 rc = -EFAULT; 1637 kvfree(device_status); 1638 return rc; 1639 } 1640 case ZCRYPT_STATUS_MASK: { 1641 char status[AP_DEVICES]; 1642 1643 zcrypt_status_mask(status, AP_DEVICES); 1644 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1645 return -EFAULT; 1646 return 0; 1647 } 1648 case ZCRYPT_QDEPTH_MASK: { 1649 char qdepth[AP_DEVICES]; 1650 1651 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 1652 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1653 return -EFAULT; 1654 return 0; 1655 } 1656 case ZCRYPT_PERDEV_REQCNT: { 1657 u32 *reqcnt; 1658 1659 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); 1660 if (!reqcnt) 1661 return -ENOMEM; 1662 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 1663 if (copy_to_user((int __user *)arg, reqcnt, 1664 sizeof(u32) * AP_DEVICES)) 1665 rc = -EFAULT; 1666 kfree(reqcnt); 1667 return rc; 1668 } 1669 case Z90STAT_REQUESTQ_COUNT: 1670 return put_user(zcrypt_requestq_count(), (int __user *)arg); 1671 case Z90STAT_PENDINGQ_COUNT: 1672 return put_user(zcrypt_pendingq_count(), (int __user *)arg); 1673 case Z90STAT_TOTALOPEN_COUNT: 1674 return put_user(atomic_read(&zcrypt_open_count), 1675 (int __user *)arg); 1676 case Z90STAT_DOMAIN_INDEX: 1677 return put_user(ap_domain_index, (int __user *)arg); 1678 /* 1679 * Deprecated ioctls 1680 */ 1681 case ZDEVICESTATUS: { 1682 /* the old ioctl supports only 64 adapters */ 1683 struct zcrypt_device_status *device_status; 1684 size_t total_size = MAX_ZDEV_ENTRIES 1685 * sizeof(struct zcrypt_device_status); 1686 1687 device_status = kzalloc(total_size, GFP_KERNEL); 1688 if (!device_status) 1689 return -ENOMEM; 1690 zcrypt_device_status_mask(device_status); 1691 if (copy_to_user((char __user *)arg, device_status, 1692 total_size)) 1693 rc = -EFAULT; 1694 kfree(device_status); 1695 return rc; 1696 } 1697 case Z90STAT_STATUS_MASK: { 1698 /* the old ioctl supports only 64 adapters */ 1699 char status[MAX_ZDEV_CARDIDS]; 1700 1701 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 1702 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1703 return -EFAULT; 1704 return 0; 1705 } 1706 case Z90STAT_QDEPTH_MASK: { 1707 /* the old ioctl supports only 64 adapters */ 1708 char qdepth[MAX_ZDEV_CARDIDS]; 1709 1710 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 1711 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1712 return -EFAULT; 1713 return 0; 1714 } 1715 case Z90STAT_PERDEV_REQCNT: { 1716 /* the old ioctl supports only 64 adapters */ 1717 u32 reqcnt[MAX_ZDEV_CARDIDS]; 1718 1719 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 1720 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt))) 1721 return -EFAULT; 1722 return 0; 1723 } 1724 /* unknown ioctl number */ 1725 default: 1726 pr_debug("unknown ioctl 0x%08x\n", cmd); 1727 return -ENOIOCTLCMD; 1728 } 1729 } 1730 1731 #ifdef CONFIG_COMPAT 1732 /* 1733 * ioctl32 conversion routines 1734 */ 1735 struct compat_ica_rsa_modexpo { 1736 compat_uptr_t inputdata; 1737 unsigned int inputdatalength; 1738 compat_uptr_t outputdata; 1739 unsigned int outputdatalength; 1740 compat_uptr_t b_key; 1741 compat_uptr_t n_modulus; 1742 }; 1743 1744 static long trans_modexpo32(struct ap_perms *perms, struct file *filp, 1745 unsigned int cmd, unsigned long arg) 1746 { 1747 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 1748 struct compat_ica_rsa_modexpo mex32; 1749 struct ica_rsa_modexpo mex64; 1750 struct zcrypt_track tr; 1751 long rc; 1752 1753 memset(&tr, 0, sizeof(tr)); 1754 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 1755 return -EFAULT; 1756 mex64.inputdata = compat_ptr(mex32.inputdata); 1757 mex64.inputdatalength = mex32.inputdatalength; 1758 mex64.outputdata = compat_ptr(mex32.outputdata); 1759 mex64.outputdatalength = mex32.outputdatalength; 1760 mex64.b_key = compat_ptr(mex32.b_key); 1761 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1762 do { 1763 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1764 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1765 1766 /* on ENODEV failure: retry once again after a requested rescan */ 1767 if (rc == -ENODEV && zcrypt_process_rescan()) 1768 do { 1769 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1770 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1771 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1772 rc = -EIO; 1773 if (rc) 1774 return rc; 1775 return put_user(mex64.outputdatalength, 1776 &umex32->outputdatalength); 1777 } 1778 1779 struct compat_ica_rsa_modexpo_crt { 1780 compat_uptr_t inputdata; 1781 unsigned int inputdatalength; 1782 compat_uptr_t outputdata; 1783 unsigned int outputdatalength; 1784 compat_uptr_t bp_key; 1785 compat_uptr_t bq_key; 1786 compat_uptr_t np_prime; 1787 compat_uptr_t nq_prime; 1788 compat_uptr_t u_mult_inv; 1789 }; 1790 1791 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, 1792 unsigned int cmd, unsigned long arg) 1793 { 1794 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1795 struct compat_ica_rsa_modexpo_crt crt32; 1796 struct ica_rsa_modexpo_crt crt64; 1797 struct zcrypt_track tr; 1798 long rc; 1799 1800 memset(&tr, 0, sizeof(tr)); 1801 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1802 return -EFAULT; 1803 crt64.inputdata = compat_ptr(crt32.inputdata); 1804 crt64.inputdatalength = crt32.inputdatalength; 1805 crt64.outputdata = compat_ptr(crt32.outputdata); 1806 crt64.outputdatalength = crt32.outputdatalength; 1807 crt64.bp_key = compat_ptr(crt32.bp_key); 1808 crt64.bq_key = compat_ptr(crt32.bq_key); 1809 crt64.np_prime = compat_ptr(crt32.np_prime); 1810 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1811 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1812 do { 1813 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1814 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1815 1816 /* on ENODEV failure: retry once again after a requested rescan */ 1817 if (rc == -ENODEV && zcrypt_process_rescan()) 1818 do { 1819 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1820 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1821 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1822 rc = -EIO; 1823 if (rc) 1824 return rc; 1825 return put_user(crt64.outputdatalength, 1826 &ucrt32->outputdatalength); 1827 } 1828 1829 struct compat_ica_xcrb { 1830 unsigned short agent_ID; 1831 unsigned int user_defined; 1832 unsigned short request_ID; 1833 unsigned int request_control_blk_length; 1834 unsigned char padding1[16 - sizeof(compat_uptr_t)]; 1835 compat_uptr_t request_control_blk_addr; 1836 unsigned int request_data_length; 1837 char padding2[16 - sizeof(compat_uptr_t)]; 1838 compat_uptr_t request_data_address; 1839 unsigned int reply_control_blk_length; 1840 char padding3[16 - sizeof(compat_uptr_t)]; 1841 compat_uptr_t reply_control_blk_addr; 1842 unsigned int reply_data_length; 1843 char padding4[16 - sizeof(compat_uptr_t)]; 1844 compat_uptr_t reply_data_addr; 1845 unsigned short priority_window; 1846 unsigned int status; 1847 } __packed; 1848 1849 static long trans_xcrb32(struct ap_perms *perms, struct file *filp, 1850 unsigned int cmd, unsigned long arg) 1851 { 1852 struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); 1853 u32 xflags = ZCRYPT_XFLAG_USERSPACE; 1854 struct compat_ica_xcrb xcrb32; 1855 struct zcrypt_track tr; 1856 struct ica_xcRB xcrb64; 1857 long rc; 1858 1859 memset(&tr, 0, sizeof(tr)); 1860 if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) 1861 return -EFAULT; 1862 xcrb64.agent_ID = xcrb32.agent_ID; 1863 xcrb64.user_defined = xcrb32.user_defined; 1864 xcrb64.request_ID = xcrb32.request_ID; 1865 xcrb64.request_control_blk_length = 1866 xcrb32.request_control_blk_length; 1867 xcrb64.request_control_blk_addr = 1868 compat_ptr(xcrb32.request_control_blk_addr); 1869 xcrb64.request_data_length = 1870 xcrb32.request_data_length; 1871 xcrb64.request_data_address = 1872 compat_ptr(xcrb32.request_data_address); 1873 xcrb64.reply_control_blk_length = 1874 xcrb32.reply_control_blk_length; 1875 xcrb64.reply_control_blk_addr = 1876 compat_ptr(xcrb32.reply_control_blk_addr); 1877 xcrb64.reply_data_length = xcrb32.reply_data_length; 1878 xcrb64.reply_data_addr = 1879 compat_ptr(xcrb32.reply_data_addr); 1880 xcrb64.priority_window = xcrb32.priority_window; 1881 xcrb64.status = xcrb32.status; 1882 do { 1883 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); 1884 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1885 1886 /* on ENODEV failure: retry once again after a requested rescan */ 1887 if (rc == -ENODEV && zcrypt_process_rescan()) 1888 do { 1889 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); 1890 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1891 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1892 rc = -EIO; 1893 xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; 1894 xcrb32.reply_data_length = xcrb64.reply_data_length; 1895 xcrb32.status = xcrb64.status; 1896 if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) 1897 return -EFAULT; 1898 return rc; 1899 } 1900 1901 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1902 unsigned long arg) 1903 { 1904 int rc; 1905 struct ap_perms *perms = 1906 (struct ap_perms *)filp->private_data; 1907 1908 rc = zcrypt_check_ioctl(perms, cmd); 1909 if (rc) 1910 return rc; 1911 1912 if (cmd == ICARSAMODEXPO) 1913 return trans_modexpo32(perms, filp, cmd, arg); 1914 if (cmd == ICARSACRT) 1915 return trans_modexpo_crt32(perms, filp, cmd, arg); 1916 if (cmd == ZSECSENDCPRB) 1917 return trans_xcrb32(perms, filp, cmd, arg); 1918 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1919 } 1920 #endif 1921 1922 /* 1923 * Misc device file operations. 1924 */ 1925 static const struct file_operations zcrypt_fops = { 1926 .owner = THIS_MODULE, 1927 .read = zcrypt_read, 1928 .write = zcrypt_write, 1929 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1930 #ifdef CONFIG_COMPAT 1931 .compat_ioctl = zcrypt_compat_ioctl, 1932 #endif 1933 .open = zcrypt_open, 1934 .release = zcrypt_release, 1935 }; 1936 1937 /* 1938 * Misc device. 1939 */ 1940 static struct miscdevice zcrypt_misc_device = { 1941 .minor = MISC_DYNAMIC_MINOR, 1942 .name = "z90crypt", 1943 .fops = &zcrypt_fops, 1944 }; 1945 1946 static int zcrypt_rng_device_count; 1947 static u32 *zcrypt_rng_buffer; 1948 static int zcrypt_rng_buffer_index; 1949 static DEFINE_MUTEX(zcrypt_rng_mutex); 1950 1951 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1952 { 1953 int rc; 1954 1955 /* 1956 * We don't need locking here because the RNG API guarantees serialized 1957 * read method calls. 1958 */ 1959 if (zcrypt_rng_buffer_index == 0) { 1960 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1961 /* on ENODEV failure: retry once again after an AP bus rescan */ 1962 if (rc == -ENODEV && zcrypt_process_rescan()) 1963 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1964 if (rc < 0) 1965 return -EIO; 1966 zcrypt_rng_buffer_index = rc / sizeof(*data); 1967 } 1968 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1969 return sizeof(*data); 1970 } 1971 1972 static struct hwrng zcrypt_rng_dev = { 1973 .name = "zcrypt", 1974 .data_read = zcrypt_rng_data_read, 1975 .quality = 990, 1976 }; 1977 1978 int zcrypt_rng_device_add(void) 1979 { 1980 int rc = 0; 1981 1982 mutex_lock(&zcrypt_rng_mutex); 1983 if (zcrypt_rng_device_count == 0) { 1984 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL); 1985 if (!zcrypt_rng_buffer) { 1986 rc = -ENOMEM; 1987 goto out; 1988 } 1989 zcrypt_rng_buffer_index = 0; 1990 rc = hwrng_register(&zcrypt_rng_dev); 1991 if (rc) 1992 goto out_free; 1993 zcrypt_rng_device_count = 1; 1994 } else { 1995 zcrypt_rng_device_count++; 1996 } 1997 mutex_unlock(&zcrypt_rng_mutex); 1998 return 0; 1999 2000 out_free: 2001 free_page((unsigned long)zcrypt_rng_buffer); 2002 out: 2003 mutex_unlock(&zcrypt_rng_mutex); 2004 return rc; 2005 } 2006 2007 void zcrypt_rng_device_remove(void) 2008 { 2009 mutex_lock(&zcrypt_rng_mutex); 2010 zcrypt_rng_device_count--; 2011 if (zcrypt_rng_device_count == 0) { 2012 hwrng_unregister(&zcrypt_rng_dev); 2013 free_page((unsigned long)zcrypt_rng_buffer); 2014 } 2015 mutex_unlock(&zcrypt_rng_mutex); 2016 } 2017 2018 /* 2019 * Wait until the zcrypt api is operational. 2020 * The AP bus scan and the binding of ap devices to device drivers is 2021 * an asynchronous job. This function waits until these initial jobs 2022 * are done and so the zcrypt api should be ready to serve crypto 2023 * requests - if there are resources available. The function uses an 2024 * internal timeout of 30s. The very first caller will either wait for 2025 * ap bus bindings complete or the timeout happens. This state will be 2026 * remembered for further callers which will only be blocked until a 2027 * decision is made (timeout or bindings complete). 2028 * On timeout -ETIME is returned, on success the return value is 0. 2029 */ 2030 int zcrypt_wait_api_operational(void) 2031 { 2032 static DEFINE_MUTEX(zcrypt_wait_api_lock); 2033 static int zcrypt_wait_api_state; 2034 int rc; 2035 2036 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock); 2037 if (rc) 2038 return rc; 2039 2040 switch (zcrypt_wait_api_state) { 2041 case 0: 2042 /* initial state, invoke wait for the ap bus complete */ 2043 rc = ap_wait_apqn_bindings_complete( 2044 msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS)); 2045 switch (rc) { 2046 case 0: 2047 /* ap bus bindings are complete */ 2048 zcrypt_wait_api_state = 1; 2049 break; 2050 case -EINTR: 2051 /* interrupted, go back to caller */ 2052 break; 2053 case -ETIME: 2054 /* timeout */ 2055 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n", 2056 __func__); 2057 zcrypt_wait_api_state = -ETIME; 2058 break; 2059 default: 2060 /* other failure */ 2061 pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc); 2062 break; 2063 } 2064 break; 2065 case 1: 2066 /* a previous caller already found ap bus bindings complete */ 2067 rc = 0; 2068 break; 2069 default: 2070 /* a previous caller had timeout or other failure */ 2071 rc = zcrypt_wait_api_state; 2072 break; 2073 } 2074 2075 mutex_unlock(&zcrypt_wait_api_lock); 2076 2077 return rc; 2078 } 2079 EXPORT_SYMBOL(zcrypt_wait_api_operational); 2080 2081 int __init zcrypt_debug_init(void) 2082 { 2083 zcrypt_dbf_info = debug_register("zcrypt", 2, 1, 2084 ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long)); 2085 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 2086 debug_set_level(zcrypt_dbf_info, DBF_ERR); 2087 2088 return 0; 2089 } 2090 2091 void zcrypt_debug_exit(void) 2092 { 2093 debug_unregister(zcrypt_dbf_info); 2094 } 2095 2096 static int __init zcdn_init(void) 2097 { 2098 int rc; 2099 2100 /* create a new class 'zcrypt' */ 2101 rc = class_register(&zcrypt_class); 2102 if (rc) 2103 goto out_class_register_failed; 2104 2105 /* alloc device minor range */ 2106 rc = alloc_chrdev_region(&zcrypt_devt, 2107 0, ZCRYPT_MAX_MINOR_NODES, 2108 ZCRYPT_NAME); 2109 if (rc) 2110 goto out_alloc_chrdev_failed; 2111 2112 cdev_init(&zcrypt_cdev, &zcrypt_fops); 2113 zcrypt_cdev.owner = THIS_MODULE; 2114 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2115 if (rc) 2116 goto out_cdev_add_failed; 2117 2118 /* need some class specific sysfs attributes */ 2119 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create); 2120 if (rc) 2121 goto out_class_create_file_1_failed; 2122 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy); 2123 if (rc) 2124 goto out_class_create_file_2_failed; 2125 2126 return 0; 2127 2128 out_class_create_file_2_failed: 2129 class_remove_file(&zcrypt_class, &class_attr_zcdn_create); 2130 out_class_create_file_1_failed: 2131 cdev_del(&zcrypt_cdev); 2132 out_cdev_add_failed: 2133 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2134 out_alloc_chrdev_failed: 2135 class_unregister(&zcrypt_class); 2136 out_class_register_failed: 2137 return rc; 2138 } 2139 2140 static void zcdn_exit(void) 2141 { 2142 class_remove_file(&zcrypt_class, &class_attr_zcdn_create); 2143 class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy); 2144 zcdn_destroy_all(); 2145 cdev_del(&zcrypt_cdev); 2146 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2147 class_unregister(&zcrypt_class); 2148 } 2149 2150 /* 2151 * zcrypt_api_init(): Module initialization. 2152 * 2153 * The module initialization code. 2154 */ 2155 int __init zcrypt_api_init(void) 2156 { 2157 int rc; 2158 2159 /* make sure the mempool threshold is >= 1 */ 2160 if (zcrypt_mempool_threshold < 1) { 2161 rc = -EINVAL; 2162 goto out; 2163 } 2164 2165 rc = zcrypt_debug_init(); 2166 if (rc) 2167 goto out; 2168 2169 rc = zcdn_init(); 2170 if (rc) 2171 goto out_zcdn_init_failed; 2172 2173 rc = zcrypt_ccamisc_init(); 2174 if (rc) 2175 goto out_ccamisc_init_failed; 2176 2177 rc = zcrypt_ep11misc_init(); 2178 if (rc) 2179 goto out_ep11misc_init_failed; 2180 2181 /* Register the request sprayer. */ 2182 rc = misc_register(&zcrypt_misc_device); 2183 if (rc < 0) 2184 goto out_misc_register_failed; 2185 2186 zcrypt_msgtype6_init(); 2187 zcrypt_msgtype50_init(); 2188 2189 return 0; 2190 2191 out_misc_register_failed: 2192 zcrypt_ep11misc_exit(); 2193 out_ep11misc_init_failed: 2194 zcrypt_ccamisc_exit(); 2195 out_ccamisc_init_failed: 2196 zcdn_exit(); 2197 out_zcdn_init_failed: 2198 zcrypt_debug_exit(); 2199 out: 2200 return rc; 2201 } 2202 2203 /* 2204 * zcrypt_api_exit(): Module termination. 2205 * 2206 * The module termination code. 2207 */ 2208 void __exit zcrypt_api_exit(void) 2209 { 2210 zcdn_exit(); 2211 misc_deregister(&zcrypt_misc_device); 2212 zcrypt_msgtype6_exit(); 2213 zcrypt_msgtype50_exit(); 2214 zcrypt_ccamisc_exit(); 2215 zcrypt_ep11misc_exit(); 2216 zcrypt_debug_exit(); 2217 } 2218 2219 module_init(zcrypt_api_init); 2220 module_exit(zcrypt_api_exit); 2221