1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Adjunct processor matrix VFIO device driver callbacks. 4 * 5 * Copyright IBM Corp. 2018 6 * 7 * Author(s): Tony Krowiak <akrowiak@linux.ibm.com> 8 * Halil Pasic <pasic@linux.ibm.com> 9 * Pierre Morel <pmorel@linux.ibm.com> 10 */ 11 #include <linux/string.h> 12 #include <linux/vfio.h> 13 #include <linux/device.h> 14 #include <linux/list.h> 15 #include <linux/ctype.h> 16 #include <linux/bitops.h> 17 #include <linux/kvm_host.h> 18 #include <linux/module.h> 19 #include <linux/uuid.h> 20 #include <asm/kvm.h> 21 #include <asm/zcrypt.h> 22 23 #include "vfio_ap_private.h" 24 #include "vfio_ap_debug.h" 25 26 #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" 27 #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" 28 29 #define AP_QUEUE_ASSIGNED "assigned" 30 #define AP_QUEUE_UNASSIGNED "unassigned" 31 #define AP_QUEUE_IN_USE "in use" 32 33 #define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */ 34 35 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev); 36 static int vfio_ap_mdev_reset_qlist(struct list_head *qlist); 37 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); 38 static const struct vfio_device_ops vfio_ap_matrix_dev_ops; 39 static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q); 40 41 /** 42 * get_update_locks_for_kvm: Acquire the locks required to dynamically update a 43 * KVM guest's APCB in the proper order. 44 * 45 * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. 46 * 47 * The proper locking order is: 48 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 49 * guest's APCB. 50 * 2. kvm->lock: required to update a guest's APCB 51 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 52 * 53 * Note: If @kvm is NULL, the KVM lock will not be taken. 54 */ 55 static inline void get_update_locks_for_kvm(struct kvm *kvm) 56 { 57 mutex_lock(&matrix_dev->guests_lock); 58 if (kvm) 59 mutex_lock(&kvm->lock); 60 mutex_lock(&matrix_dev->mdevs_lock); 61 } 62 63 /** 64 * release_update_locks_for_kvm: Release the locks used to dynamically update a 65 * KVM guest's APCB in the proper order. 66 * 67 * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. 68 * 69 * The proper unlocking order is: 70 * 1. matrix_dev->mdevs_lock 71 * 2. kvm->lock 72 * 3. matrix_dev->guests_lock 73 * 74 * Note: If @kvm is NULL, the KVM lock will not be released. 75 */ 76 static inline void release_update_locks_for_kvm(struct kvm *kvm) 77 { 78 mutex_unlock(&matrix_dev->mdevs_lock); 79 if (kvm) 80 mutex_unlock(&kvm->lock); 81 mutex_unlock(&matrix_dev->guests_lock); 82 } 83 84 /** 85 * get_update_locks_for_mdev: Acquire the locks required to dynamically update a 86 * KVM guest's APCB in the proper order. 87 * 88 * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP 89 * configuration data to use to update a KVM guest's APCB. 90 * 91 * The proper locking order is: 92 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 93 * guest's APCB. 94 * 2. matrix_mdev->kvm->lock: required to update a guest's APCB 95 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 96 * 97 * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM 98 * lock will not be taken. 99 */ 100 static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) 101 { 102 mutex_lock(&matrix_dev->guests_lock); 103 if (matrix_mdev && matrix_mdev->kvm) 104 mutex_lock(&matrix_mdev->kvm->lock); 105 mutex_lock(&matrix_dev->mdevs_lock); 106 } 107 108 /** 109 * release_update_locks_for_mdev: Release the locks used to dynamically update a 110 * KVM guest's APCB in the proper order. 111 * 112 * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP 113 * configuration data to use to update a KVM guest's APCB. 114 * 115 * The proper unlocking order is: 116 * 1. matrix_dev->mdevs_lock 117 * 2. matrix_mdev->kvm->lock 118 * 3. matrix_dev->guests_lock 119 * 120 * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM 121 * lock will not be released. 122 */ 123 static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) 124 { 125 mutex_unlock(&matrix_dev->mdevs_lock); 126 if (matrix_mdev && matrix_mdev->kvm) 127 mutex_unlock(&matrix_mdev->kvm->lock); 128 mutex_unlock(&matrix_dev->guests_lock); 129 } 130 131 /** 132 * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and 133 * acquire the locks required to update the APCB of 134 * the KVM guest to which the mdev is attached. 135 * 136 * @apqn: the APQN of a queue device. 137 * 138 * The proper locking order is: 139 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 140 * guest's APCB. 141 * 2. matrix_mdev->kvm->lock: required to update a guest's APCB 142 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 143 * 144 * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock 145 * will not be taken. 146 * 147 * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn 148 * is not assigned to an ap_matrix_mdev. 149 */ 150 static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn) 151 { 152 struct ap_matrix_mdev *matrix_mdev; 153 154 mutex_lock(&matrix_dev->guests_lock); 155 156 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 157 if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) && 158 test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) { 159 if (matrix_mdev->kvm) 160 mutex_lock(&matrix_mdev->kvm->lock); 161 162 mutex_lock(&matrix_dev->mdevs_lock); 163 164 return matrix_mdev; 165 } 166 } 167 168 mutex_lock(&matrix_dev->mdevs_lock); 169 170 return NULL; 171 } 172 173 /** 174 * get_update_locks_for_queue: get the locks required to update the APCB of the 175 * KVM guest to which the matrix mdev linked to a 176 * vfio_ap_queue object is attached. 177 * 178 * @q: a pointer to a vfio_ap_queue object. 179 * 180 * The proper locking order is: 181 * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a 182 * KVM guest's APCB. 183 * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB 184 * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev 185 * 186 * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock 187 * will not be taken. 188 */ 189 static inline void get_update_locks_for_queue(struct vfio_ap_queue *q) 190 { 191 mutex_lock(&matrix_dev->guests_lock); 192 if (q->matrix_mdev && q->matrix_mdev->kvm) 193 mutex_lock(&q->matrix_mdev->kvm->lock); 194 mutex_lock(&matrix_dev->mdevs_lock); 195 } 196 197 /** 198 * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a 199 * hash table of queues assigned to a matrix mdev 200 * @matrix_mdev: the matrix mdev 201 * @apqn: The APQN of a queue device 202 * 203 * Return: the pointer to the vfio_ap_queue struct representing the queue or 204 * NULL if the queue is not assigned to @matrix_mdev 205 */ 206 static struct vfio_ap_queue *vfio_ap_mdev_get_queue( 207 struct ap_matrix_mdev *matrix_mdev, 208 int apqn) 209 { 210 struct vfio_ap_queue *q; 211 212 hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode, 213 apqn) { 214 if (q && q->apqn == apqn) 215 return q; 216 } 217 218 return NULL; 219 } 220 221 /** 222 * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries 223 * @apqn: The AP Queue number 224 * 225 * Checks the IRQ bit for the status of this APQN using ap_tapq. 226 * Returns if the ap_tapq function succeeded and the bit is clear. 227 * Returns if ap_tapq function failed with invalid, deconfigured or 228 * checkstopped AP. 229 * Otherwise retries up to 5 times after waiting 20ms. 230 */ 231 static void vfio_ap_wait_for_irqclear(int apqn) 232 { 233 struct ap_queue_status status; 234 int retry = 5; 235 236 do { 237 status = ap_tapq(apqn, NULL); 238 switch (status.response_code) { 239 case AP_RESPONSE_NORMAL: 240 case AP_RESPONSE_RESET_IN_PROGRESS: 241 if (!status.irq_enabled) 242 return; 243 fallthrough; 244 case AP_RESPONSE_BUSY: 245 msleep(20); 246 break; 247 case AP_RESPONSE_Q_NOT_AVAIL: 248 case AP_RESPONSE_DECONFIGURED: 249 case AP_RESPONSE_CHECKSTOPPED: 250 default: 251 WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__, 252 status.response_code, apqn); 253 return; 254 } 255 } while (--retry); 256 257 WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n", 258 __func__, status.response_code, apqn); 259 } 260 261 /** 262 * vfio_ap_free_aqic_resources - free vfio_ap_queue resources 263 * @q: The vfio_ap_queue 264 * 265 * Unregisters the ISC in the GIB when the saved ISC not invalid. 266 * Unpins the guest's page holding the NIB when it exists. 267 * Resets the saved_iova and saved_isc to invalid values. 268 */ 269 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) 270 { 271 if (!q) 272 return; 273 if (q->saved_isc != VFIO_AP_ISC_INVALID && 274 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { 275 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); 276 q->saved_isc = VFIO_AP_ISC_INVALID; 277 } 278 if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) { 279 vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1); 280 q->saved_iova = 0; 281 } 282 } 283 284 /** 285 * vfio_ap_irq_disable - disables and clears an ap_queue interrupt 286 * @q: The vfio_ap_queue 287 * 288 * Uses ap_aqic to disable the interruption and in case of success, reset 289 * in progress or IRQ disable command already proceeded: calls 290 * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear 291 * and calls vfio_ap_free_aqic_resources() to free the resources associated 292 * with the AP interrupt handling. 293 * 294 * In the case the AP is busy, or a reset is in progress, 295 * retries after 20ms, up to 5 times. 296 * 297 * Returns if ap_aqic function failed with invalid, deconfigured or 298 * checkstopped AP. 299 * 300 * Return: &struct ap_queue_status 301 */ 302 static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) 303 { 304 union ap_qirq_ctrl aqic_gisa = { .value = 0 }; 305 struct ap_queue_status status; 306 int retries = 5; 307 308 do { 309 status = ap_aqic(q->apqn, aqic_gisa, 0); 310 switch (status.response_code) { 311 case AP_RESPONSE_OTHERWISE_CHANGED: 312 case AP_RESPONSE_NORMAL: 313 vfio_ap_wait_for_irqclear(q->apqn); 314 goto end_free; 315 case AP_RESPONSE_RESET_IN_PROGRESS: 316 case AP_RESPONSE_BUSY: 317 msleep(20); 318 break; 319 case AP_RESPONSE_Q_NOT_AVAIL: 320 case AP_RESPONSE_DECONFIGURED: 321 case AP_RESPONSE_CHECKSTOPPED: 322 case AP_RESPONSE_INVALID_ADDRESS: 323 default: 324 /* All cases in default means AP not operational */ 325 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__, 326 status.response_code); 327 goto end_free; 328 } 329 } while (retries--); 330 331 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__, 332 status.response_code); 333 end_free: 334 vfio_ap_free_aqic_resources(q); 335 return status; 336 } 337 338 /** 339 * vfio_ap_validate_nib - validate a notification indicator byte (nib) address. 340 * 341 * @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction. 342 * @nib: the location for storing the nib address. 343 * 344 * When the PQAP(AQIC) instruction is executed, general register 2 contains the 345 * address of the notification indicator byte (nib) used for IRQ notification. 346 * This function parses and validates the nib from gr2. 347 * 348 * Return: returns zero if the nib address is a valid; otherwise, returns 349 * -EINVAL. 350 */ 351 static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib) 352 { 353 *nib = vcpu->run->s.regs.gprs[2]; 354 355 if (!*nib) 356 return -EINVAL; 357 if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT))) 358 return -EINVAL; 359 360 return 0; 361 } 362 363 /** 364 * ensure_nib_shared() - Ensure the address of the NIB is secure and shared 365 * @addr: the physical (absolute) address of the NIB 366 * 367 * This function checks whether the NIB page, which has been pinned with 368 * vfio_pin_pages(), is a shared page belonging to a secure guest. 369 * 370 * It will call uv_pin_shared() on it; if the page was already pinned shared 371 * (i.e. if the NIB belongs to a secure guest and is shared), then 0 372 * (success) is returned. If the NIB was not shared, vfio_pin_pages() had 373 * exported it and now it does not belong to the secure guest anymore. In 374 * that case, an error is returned. 375 * 376 * Context: the NIB (at physical address @addr) has to be pinned with 377 * vfio_pin_pages() before calling this function. 378 * 379 * Return: 0 in case of success, otherwise an error < 0. 380 */ 381 static int ensure_nib_shared(unsigned long addr) 382 { 383 /* 384 * The nib has to be located in shared storage since guest and 385 * host access it. vfio_pin_pages() will do a pin shared and 386 * if that fails (possibly because it's not a shared page) it 387 * calls export. We try to do a second pin shared here so that 388 * the UV gives us an error code if we try to pin a non-shared 389 * page. 390 * 391 * If the page is already pinned shared the UV will return a success. 392 */ 393 return uv_pin_shared(addr); 394 } 395 396 /** 397 * vfio_ap_irq_enable - Enable Interruption for a APQN 398 * 399 * @q: the vfio_ap_queue holding AQIC parameters 400 * @isc: the guest ISC to register with the GIB interface 401 * @vcpu: the vcpu object containing the registers specifying the parameters 402 * passed to the PQAP(AQIC) instruction. 403 * 404 * Pin the NIB saved in *q 405 * Register the guest ISC to GIB interface and retrieve the 406 * host ISC to issue the host side PQAP/AQIC 407 * 408 * status.response_code may be set to AP_RESPONSE_INVALID_ADDRESS in case the 409 * vfio_pin_pages or kvm_s390_gisc_register failed. 410 * 411 * Otherwise return the ap_queue_status returned by the ap_aqic(), 412 * all retry handling will be done by the guest. 413 * 414 * Return: &struct ap_queue_status 415 */ 416 static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, 417 int isc, 418 struct kvm_vcpu *vcpu) 419 { 420 union ap_qirq_ctrl aqic_gisa = { .value = 0 }; 421 struct ap_queue_status status = {}; 422 struct kvm_s390_gisa *gisa; 423 struct page *h_page; 424 int nisc; 425 struct kvm *kvm; 426 phys_addr_t h_nib; 427 dma_addr_t nib; 428 int ret; 429 430 /* Verify that the notification indicator byte address is valid */ 431 if (vfio_ap_validate_nib(vcpu, &nib)) { 432 VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n", 433 __func__, &nib, q->apqn); 434 435 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 436 return status; 437 } 438 439 /* The pin will probably be successful even if the NIB was not shared */ 440 ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1, 441 IOMMU_READ | IOMMU_WRITE, &h_page); 442 switch (ret) { 443 case 1: 444 break; 445 default: 446 VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d," 447 "nib=%pad, apqn=%#04x\n", 448 __func__, ret, &nib, q->apqn); 449 450 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 451 return status; 452 } 453 454 kvm = q->matrix_mdev->kvm; 455 gisa = kvm->arch.gisa_int.origin; 456 457 h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK); 458 aqic_gisa.gisc = isc; 459 460 /* NIB in non-shared storage is a rc 6 for PV guests */ 461 if (kvm_s390_pv_cpu_is_protected(vcpu) && 462 ensure_nib_shared(h_nib & PAGE_MASK)) { 463 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 464 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 465 return status; 466 } 467 468 nisc = kvm_s390_gisc_register(kvm, isc); 469 if (nisc < 0) { 470 VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n", 471 __func__, nisc, isc, q->apqn); 472 473 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 474 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 475 return status; 476 } 477 478 aqic_gisa.isc = nisc; 479 aqic_gisa.ir = 1; 480 aqic_gisa.gisa = virt_to_phys(gisa) >> 4; 481 482 status = ap_aqic(q->apqn, aqic_gisa, h_nib); 483 switch (status.response_code) { 484 case AP_RESPONSE_NORMAL: 485 /* See if we did clear older IRQ configuration */ 486 vfio_ap_free_aqic_resources(q); 487 q->saved_iova = nib; 488 q->saved_isc = isc; 489 break; 490 case AP_RESPONSE_OTHERWISE_CHANGED: 491 /* We could not modify IRQ settings: clear new configuration */ 492 ret = kvm_s390_gisc_unregister(kvm, isc); 493 if (ret) 494 VFIO_AP_DBF_WARN("%s: kvm_s390_gisc_unregister: rc=%d isc=%d, apqn=%#04x\n", 495 __func__, ret, isc, q->apqn); 496 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 497 break; 498 default: 499 pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn, 500 status.response_code); 501 vfio_ap_irq_disable(q); 502 break; 503 } 504 505 if (status.response_code != AP_RESPONSE_NORMAL) { 506 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: " 507 "zone=%#x, ir=%#x, gisc=%#x, f=%#x," 508 "gisa=%#x, isc=%#x, apqn=%#04x\n", 509 __func__, status.response_code, 510 aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc, 511 aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc, 512 q->apqn); 513 } 514 515 return status; 516 } 517 518 /** 519 * vfio_ap_le_guid_to_be_uuid - convert a little endian guid array into an array 520 * of big endian elements that can be passed by 521 * value to an s390dbf sprintf event function to 522 * format a UUID string. 523 * 524 * @guid: the object containing the little endian guid 525 * @uuid: a six-element array of long values that can be passed by value as 526 * arguments for a formatting string specifying a UUID. 527 * 528 * The S390 Debug Feature (s390dbf) allows the use of "%s" in the sprintf 529 * event functions if the memory for the passed string is available as long as 530 * the debug feature exists. Since a mediated device can be removed at any 531 * time, it's name can not be used because %s passes the reference to the string 532 * in memory and the reference will go stale once the device is removed . 533 * 534 * The s390dbf string formatting function allows a maximum of 9 arguments for a 535 * message to be displayed in the 'sprintf' view. In order to use the bytes 536 * comprising the mediated device's UUID to display the mediated device name, 537 * they will have to be converted into an array whose elements can be passed by 538 * value to sprintf. For example: 539 * 540 * guid array: { 83, 78, 17, 62, bb, f1, f0, 47, 91, 4d, 32, a2, 2e, 3a, 88, 04 } 541 * mdev name: 62177883-f1bb-47f0-914d-32a22e3a8804 542 * array returned: { 62177883, f1bb, 47f0, 914d, 32a2, 2e3a8804 } 543 * formatting string: "%08lx-%04lx-%04lx-%04lx-%02lx%04lx" 544 */ 545 static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid) 546 { 547 /* 548 * The input guid is ordered in little endian, so it needs to be 549 * reordered for displaying a UUID as a string. This specifies the 550 * guid indices in proper order. 551 */ 552 uuid[0] = le32_to_cpup((__le32 *)guid); 553 uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]); 554 uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]); 555 uuid[3] = *((__u16 *)&guid->b[8]); 556 uuid[4] = *((__u16 *)&guid->b[10]); 557 uuid[5] = *((__u32 *)&guid->b[12]); 558 } 559 560 /** 561 * handle_pqap - PQAP instruction callback 562 * 563 * @vcpu: The vcpu on which we received the PQAP instruction 564 * 565 * Get the general register contents to initialize internal variables. 566 * REG[0]: APQN 567 * REG[1]: IR and ISC 568 * REG[2]: NIB 569 * 570 * Response.status may be set to following Response Code: 571 * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available 572 * - AP_RESPONSE_DECONFIGURED: if the queue is not configured 573 * - AP_RESPONSE_NORMAL (0) : in case of success 574 * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC. 575 * We take the matrix_dev lock to ensure serialization on queues and 576 * mediated device access. 577 * 578 * Return: 0 if we could handle the request inside KVM. 579 * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault. 580 */ 581 static int handle_pqap(struct kvm_vcpu *vcpu) 582 { 583 uint64_t status; 584 uint16_t apqn; 585 unsigned long uuid[6]; 586 struct vfio_ap_queue *q; 587 struct ap_queue_status qstatus = { 588 .response_code = AP_RESPONSE_Q_NOT_AVAIL, }; 589 struct ap_matrix_mdev *matrix_mdev; 590 591 apqn = vcpu->run->s.regs.gprs[0] & 0xffff; 592 593 /* If we do not use the AIV facility just go to userland */ 594 if (!(vcpu->arch.sie_block->eca & ECA_AIV)) { 595 VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n", 596 __func__, apqn, vcpu->arch.sie_block->eca); 597 598 return -EOPNOTSUPP; 599 } 600 601 mutex_lock(&matrix_dev->mdevs_lock); 602 603 if (!vcpu->kvm->arch.crypto.pqap_hook) { 604 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n", 605 __func__, apqn); 606 607 goto out_unlock; 608 } 609 610 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, 611 struct ap_matrix_mdev, pqap_hook); 612 613 /* If the there is no guest using the mdev, there is nothing to do */ 614 if (!matrix_mdev->kvm) { 615 vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid); 616 VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n", 617 __func__, uuid[0], uuid[1], uuid[2], 618 uuid[3], uuid[4], uuid[5], apqn); 619 goto out_unlock; 620 } 621 622 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); 623 if (!q) { 624 VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n", 625 __func__, AP_QID_CARD(apqn), 626 AP_QID_QUEUE(apqn)); 627 goto out_unlock; 628 } 629 630 status = vcpu->run->s.regs.gprs[1]; 631 632 /* If IR bit(16) is set we enable the interrupt */ 633 if ((status >> (63 - 16)) & 0x01) 634 qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu); 635 else 636 qstatus = vfio_ap_irq_disable(q); 637 638 out_unlock: 639 memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus)); 640 vcpu->run->s.regs.gprs[1] >>= 32; 641 mutex_unlock(&matrix_dev->mdevs_lock); 642 return 0; 643 } 644 645 static void vfio_ap_matrix_init(struct ap_config_info *info, 646 struct ap_matrix *matrix) 647 { 648 matrix->apm_max = info->apxa ? info->na : 63; 649 matrix->aqm_max = info->apxa ? info->nd : 15; 650 matrix->adm_max = info->apxa ? info->nd : 15; 651 } 652 653 static void signal_guest_ap_cfg_changed(struct ap_matrix_mdev *matrix_mdev) 654 { 655 if (matrix_mdev->cfg_chg_trigger) 656 eventfd_signal(matrix_mdev->cfg_chg_trigger); 657 } 658 659 static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) 660 { 661 if (matrix_mdev->kvm) { 662 kvm_arch_crypto_set_masks(matrix_mdev->kvm, 663 matrix_mdev->shadow_apcb.apm, 664 matrix_mdev->shadow_apcb.aqm, 665 matrix_mdev->shadow_apcb.adm); 666 667 signal_guest_ap_cfg_changed(matrix_mdev); 668 } 669 } 670 671 static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) 672 { 673 DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS); 674 675 bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS); 676 bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm, 677 (unsigned long *)matrix_dev->info.adm, AP_DOMAINS); 678 679 return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, 680 AP_DOMAINS); 681 } 682 683 static bool _queue_passable(struct vfio_ap_queue *q) 684 { 685 if (!q) 686 return false; 687 688 switch (q->reset_status.response_code) { 689 case AP_RESPONSE_NORMAL: 690 case AP_RESPONSE_DECONFIGURED: 691 case AP_RESPONSE_CHECKSTOPPED: 692 return true; 693 default: 694 return false; 695 } 696 } 697 698 /* 699 * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev 700 * to ensure no queue devices are passed through to 701 * the guest that are not bound to the vfio_ap 702 * device driver. 703 * 704 * @matrix_mdev: the matrix mdev whose matrix is to be filtered. 705 * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the 706 * guest's AP configuration that are still in the host's AP 707 * configuration. 708 * 709 * Note: If an APQN referencing a queue device that is not bound to the vfio_ap 710 * driver, its APID will be filtered from the guest's APCB. The matrix 711 * structure precludes filtering an individual APQN, so its APID will be 712 * filtered. Consequently, all queues associated with the adapter that 713 * are in the host's AP configuration must be reset. If queues are 714 * subsequently made available again to the guest, they should re-appear 715 * in a reset state 716 * 717 * Return: a boolean value indicating whether the KVM guest's APCB was changed 718 * by the filtering or not. 719 */ 720 static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev, 721 unsigned long *apm_filtered) 722 { 723 unsigned long apid, apqi, apqn; 724 DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES); 725 DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS); 726 727 bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES); 728 bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS); 729 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); 730 bitmap_clear(apm_filtered, 0, AP_DEVICES); 731 732 /* 733 * Copy the adapters, domains and control domains to the shadow_apcb 734 * from the matrix mdev, but only those that are assigned to the host's 735 * AP configuration. 736 */ 737 bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm, 738 (unsigned long *)matrix_dev->info.apm, AP_DEVICES); 739 bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm, 740 (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS); 741 742 for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) { 743 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, 744 AP_DOMAINS) { 745 /* 746 * If the APQN is not bound to the vfio_ap device 747 * driver, then we can't assign it to the guest's 748 * AP configuration. The AP architecture won't 749 * allow filtering of a single APQN, so let's filter 750 * the APID since an adapter represents a physical 751 * hardware device. 752 */ 753 apqn = AP_MKQID(apid, apqi); 754 if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) { 755 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 756 757 /* 758 * If the adapter was previously plugged into 759 * the guest, let's let the caller know that 760 * the APID was filtered. 761 */ 762 if (test_bit_inv(apid, prev_shadow_apm)) 763 set_bit_inv(apid, apm_filtered); 764 765 break; 766 } 767 } 768 } 769 770 return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, 771 AP_DEVICES) || 772 !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, 773 AP_DOMAINS); 774 } 775 776 static int vfio_ap_mdev_init_dev(struct vfio_device *vdev) 777 { 778 struct ap_matrix_mdev *matrix_mdev = 779 container_of(vdev, struct ap_matrix_mdev, vdev); 780 781 matrix_mdev->mdev = to_mdev_device(vdev->dev); 782 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); 783 matrix_mdev->pqap_hook = handle_pqap; 784 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); 785 hash_init(matrix_mdev->qtable.queues); 786 787 return 0; 788 } 789 790 static int vfio_ap_mdev_probe(struct mdev_device *mdev) 791 { 792 struct ap_matrix_mdev *matrix_mdev; 793 int ret; 794 795 matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev, 796 &vfio_ap_matrix_dev_ops); 797 if (IS_ERR(matrix_mdev)) 798 return PTR_ERR(matrix_mdev); 799 800 ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); 801 if (ret) 802 goto err_put_vdev; 803 matrix_mdev->req_trigger = NULL; 804 matrix_mdev->cfg_chg_trigger = NULL; 805 dev_set_drvdata(&mdev->dev, matrix_mdev); 806 mutex_lock(&matrix_dev->mdevs_lock); 807 list_add(&matrix_mdev->node, &matrix_dev->mdev_list); 808 mutex_unlock(&matrix_dev->mdevs_lock); 809 return 0; 810 811 err_put_vdev: 812 vfio_put_device(&matrix_mdev->vdev); 813 return ret; 814 } 815 816 static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev, 817 struct vfio_ap_queue *q) 818 { 819 if (!q || vfio_ap_mdev_get_queue(matrix_mdev, q->apqn)) 820 return; 821 822 q->matrix_mdev = matrix_mdev; 823 hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn); 824 } 825 826 static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn) 827 { 828 struct vfio_ap_queue *q; 829 830 q = vfio_ap_find_queue(apqn); 831 vfio_ap_mdev_link_queue(matrix_mdev, q); 832 } 833 834 static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q) 835 { 836 hash_del(&q->mdev_qnode); 837 } 838 839 static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q) 840 { 841 q->matrix_mdev = NULL; 842 } 843 844 static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev) 845 { 846 struct vfio_ap_queue *q; 847 unsigned long apid, apqi; 848 849 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { 850 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, 851 AP_DOMAINS) { 852 q = vfio_ap_mdev_get_queue(matrix_mdev, 853 AP_MKQID(apid, apqi)); 854 if (q) 855 q->matrix_mdev = NULL; 856 } 857 } 858 } 859 860 static void vfio_ap_mdev_remove(struct mdev_device *mdev) 861 { 862 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); 863 864 vfio_unregister_group_dev(&matrix_mdev->vdev); 865 866 mutex_lock(&matrix_dev->guests_lock); 867 mutex_lock(&matrix_dev->mdevs_lock); 868 vfio_ap_mdev_reset_queues(matrix_mdev); 869 vfio_ap_mdev_unlink_fr_queues(matrix_mdev); 870 list_del(&matrix_mdev->node); 871 mutex_unlock(&matrix_dev->mdevs_lock); 872 mutex_unlock(&matrix_dev->guests_lock); 873 vfio_put_device(&matrix_mdev->vdev); 874 } 875 876 #define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s" 877 878 #define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev" 879 880 static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee, 881 struct ap_matrix_mdev *assigned_to, 882 unsigned long *apm, unsigned long *aqm) 883 { 884 unsigned long apid, apqi; 885 886 for_each_set_bit_inv(apid, apm, AP_DEVICES) { 887 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { 888 dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR, 889 apid, apqi, dev_name(mdev_dev(assigned_to->mdev))); 890 } 891 } 892 } 893 894 static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee, 895 unsigned long *apm, unsigned long *aqm) 896 { 897 unsigned long apid, apqi; 898 899 for_each_set_bit_inv(apid, apm, AP_DEVICES) { 900 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) 901 dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi); 902 } 903 } 904 905 /** 906 * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs 907 * 908 * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being 909 * assigned; or, NULL if this function was called by the AP bus 910 * driver in_use callback to verify none of the APQNs being reserved 911 * for the host device driver are in use by a vfio_ap mediated device 912 * @mdev_apm: mask indicating the APIDs of the APQNs to be verified 913 * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified 914 * 915 * Verifies that each APQN derived from the Cartesian product of APIDs 916 * represented by the bits set in @mdev_apm and the APQIs of the bits set in 917 * @mdev_aqm is not assigned to a mediated device other than the mdev to which 918 * the APQN is being assigned (@assignee). AP queue sharing is not allowed. 919 * 920 * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. 921 */ 922 static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee, 923 unsigned long *mdev_apm, 924 unsigned long *mdev_aqm) 925 { 926 struct ap_matrix_mdev *assigned_to; 927 DECLARE_BITMAP(apm, AP_DEVICES); 928 DECLARE_BITMAP(aqm, AP_DOMAINS); 929 930 list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) { 931 /* 932 * If the mdev to which the mdev_apm and mdev_aqm is being 933 * assigned is the same as the mdev being verified 934 */ 935 if (assignee == assigned_to) 936 continue; 937 938 memset(apm, 0, sizeof(apm)); 939 memset(aqm, 0, sizeof(aqm)); 940 941 /* 942 * We work on full longs, as we can only exclude the leftover 943 * bits in non-inverse order. The leftover is all zeros. 944 */ 945 if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm, AP_DEVICES)) 946 continue; 947 948 if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm, AP_DOMAINS)) 949 continue; 950 951 if (assignee) 952 vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm); 953 else 954 vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm); 955 956 return -EADDRINUSE; 957 } 958 959 return 0; 960 } 961 962 /** 963 * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are 964 * not reserved for the default zcrypt driver and 965 * are not assigned to another mdev. 966 * 967 * @matrix_mdev: the mdev to which the APQNs being validated are assigned. 968 * 969 * Return: One of the following values: 970 * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function, 971 * most likely -EBUSY indicating the ap_perms_mutex lock is already held. 972 * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the 973 * zcrypt default driver. 974 * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev 975 * o A zero indicating validation succeeded. 976 */ 977 static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) 978 { 979 if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm, 980 matrix_mdev->matrix.aqm)) 981 return -EADDRNOTAVAIL; 982 983 return vfio_ap_mdev_verify_no_sharing(matrix_mdev, 984 matrix_mdev->matrix.apm, 985 matrix_mdev->matrix.aqm); 986 } 987 988 static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev, 989 unsigned long apid) 990 { 991 unsigned long apqi; 992 993 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) 994 vfio_ap_mdev_link_apqn(matrix_mdev, 995 AP_MKQID(apid, apqi)); 996 } 997 998 static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev, 999 unsigned long apid, 1000 struct list_head *qlist) 1001 { 1002 struct vfio_ap_queue *q; 1003 unsigned long apqi; 1004 1005 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) { 1006 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); 1007 if (q) 1008 list_add_tail(&q->reset_qnode, qlist); 1009 } 1010 } 1011 1012 static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev, 1013 unsigned long apid) 1014 { 1015 struct list_head qlist; 1016 1017 INIT_LIST_HEAD(&qlist); 1018 collect_queues_to_reset(matrix_mdev, apid, &qlist); 1019 vfio_ap_mdev_reset_qlist(&qlist); 1020 } 1021 1022 static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev, 1023 unsigned long *apm_reset) 1024 { 1025 struct list_head qlist; 1026 unsigned long apid; 1027 1028 if (bitmap_empty(apm_reset, AP_DEVICES)) 1029 return 0; 1030 1031 INIT_LIST_HEAD(&qlist); 1032 1033 for_each_set_bit_inv(apid, apm_reset, AP_DEVICES) 1034 collect_queues_to_reset(matrix_mdev, apid, &qlist); 1035 1036 return vfio_ap_mdev_reset_qlist(&qlist); 1037 } 1038 1039 /** 1040 * assign_adapter_store - parses the APID from @buf and sets the 1041 * corresponding bit in the mediated matrix device's APM 1042 * 1043 * @dev: the matrix device 1044 * @attr: the mediated matrix device's assign_adapter attribute 1045 * @buf: a buffer containing the AP adapter number (APID) to 1046 * be assigned 1047 * @count: the number of bytes in @buf 1048 * 1049 * Return: the number of bytes processed if the APID is valid; otherwise, 1050 * returns one of the following errors: 1051 * 1052 * 1. -EINVAL 1053 * The APID is not a valid number 1054 * 1055 * 2. -ENODEV 1056 * The APID exceeds the maximum value configured for the system 1057 * 1058 * 3. -EADDRNOTAVAIL 1059 * An APQN derived from the cross product of the APID being assigned 1060 * and the APQIs previously assigned is not bound to the vfio_ap device 1061 * driver; or, if no APQIs have yet been assigned, the APID is not 1062 * contained in an APQN bound to the vfio_ap device driver. 1063 * 1064 * 4. -EADDRINUSE 1065 * An APQN derived from the cross product of the APID being assigned 1066 * and the APQIs previously assigned is being used by another mediated 1067 * matrix device 1068 * 1069 * 5. -EAGAIN 1070 * A lock required to validate the mdev's AP configuration could not 1071 * be obtained. 1072 */ 1073 static ssize_t assign_adapter_store(struct device *dev, 1074 struct device_attribute *attr, 1075 const char *buf, size_t count) 1076 { 1077 int ret; 1078 unsigned long apid; 1079 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1080 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1081 1082 mutex_lock(&ap_perms_mutex); 1083 get_update_locks_for_mdev(matrix_mdev); 1084 1085 ret = kstrtoul(buf, 0, &apid); 1086 if (ret) 1087 goto done; 1088 1089 if (apid > matrix_mdev->matrix.apm_max) { 1090 ret = -ENODEV; 1091 goto done; 1092 } 1093 1094 if (test_bit_inv(apid, matrix_mdev->matrix.apm)) { 1095 ret = count; 1096 goto done; 1097 } 1098 1099 set_bit_inv(apid, matrix_mdev->matrix.apm); 1100 1101 ret = vfio_ap_mdev_validate_masks(matrix_mdev); 1102 if (ret) { 1103 clear_bit_inv(apid, matrix_mdev->matrix.apm); 1104 goto done; 1105 } 1106 1107 vfio_ap_mdev_link_adapter(matrix_mdev, apid); 1108 1109 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 1110 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1111 reset_queues_for_apids(matrix_mdev, apm_filtered); 1112 } 1113 1114 ret = count; 1115 done: 1116 release_update_locks_for_mdev(matrix_mdev); 1117 mutex_unlock(&ap_perms_mutex); 1118 1119 return ret; 1120 } 1121 static DEVICE_ATTR_WO(assign_adapter); 1122 1123 static struct vfio_ap_queue 1124 *vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev, 1125 unsigned long apid, unsigned long apqi) 1126 { 1127 struct vfio_ap_queue *q = NULL; 1128 1129 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); 1130 /* If the queue is assigned to the matrix mdev, unlink it. */ 1131 if (q) 1132 vfio_ap_unlink_queue_fr_mdev(q); 1133 1134 return q; 1135 } 1136 1137 /** 1138 * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned 1139 * adapter from the matrix mdev to which the 1140 * adapter was assigned. 1141 * @matrix_mdev: the matrix mediated device to which the adapter was assigned. 1142 * @apid: the APID of the unassigned adapter. 1143 * @qlist: list for storing queues associated with unassigned adapter that 1144 * need to be reset. 1145 */ 1146 static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev, 1147 unsigned long apid, 1148 struct list_head *qlist) 1149 { 1150 unsigned long apqi; 1151 struct vfio_ap_queue *q; 1152 1153 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) { 1154 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); 1155 1156 if (q && qlist) { 1157 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 1158 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 1159 list_add_tail(&q->reset_qnode, qlist); 1160 } 1161 } 1162 } 1163 1164 static void vfio_ap_mdev_hot_unplug_adapters(struct ap_matrix_mdev *matrix_mdev, 1165 unsigned long *apids) 1166 { 1167 struct vfio_ap_queue *q, *tmpq; 1168 struct list_head qlist; 1169 unsigned long apid; 1170 bool apcb_update = false; 1171 1172 INIT_LIST_HEAD(&qlist); 1173 1174 for_each_set_bit_inv(apid, apids, AP_DEVICES) { 1175 vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist); 1176 1177 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) { 1178 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 1179 apcb_update = true; 1180 } 1181 } 1182 1183 /* Only update apcb if needed to avoid impacting guest */ 1184 if (apcb_update) 1185 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1186 1187 vfio_ap_mdev_reset_qlist(&qlist); 1188 1189 list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) { 1190 vfio_ap_unlink_mdev_fr_queue(q); 1191 list_del(&q->reset_qnode); 1192 } 1193 } 1194 1195 static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev, 1196 unsigned long apid) 1197 { 1198 DECLARE_BITMAP(apids, AP_DEVICES); 1199 1200 bitmap_zero(apids, AP_DEVICES); 1201 set_bit_inv(apid, apids); 1202 vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, apids); 1203 } 1204 1205 /** 1206 * unassign_adapter_store - parses the APID from @buf and clears the 1207 * corresponding bit in the mediated matrix device's APM 1208 * 1209 * @dev: the matrix device 1210 * @attr: the mediated matrix device's unassign_adapter attribute 1211 * @buf: a buffer containing the adapter number (APID) to be unassigned 1212 * @count: the number of bytes in @buf 1213 * 1214 * Return: the number of bytes processed if the APID is valid; otherwise, 1215 * returns one of the following errors: 1216 * -EINVAL if the APID is not a number 1217 * -ENODEV if the APID it exceeds the maximum value configured for the 1218 * system 1219 */ 1220 static ssize_t unassign_adapter_store(struct device *dev, 1221 struct device_attribute *attr, 1222 const char *buf, size_t count) 1223 { 1224 int ret; 1225 unsigned long apid; 1226 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1227 1228 get_update_locks_for_mdev(matrix_mdev); 1229 1230 ret = kstrtoul(buf, 0, &apid); 1231 if (ret) 1232 goto done; 1233 1234 if (apid > matrix_mdev->matrix.apm_max) { 1235 ret = -ENODEV; 1236 goto done; 1237 } 1238 1239 if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) { 1240 ret = count; 1241 goto done; 1242 } 1243 1244 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); 1245 vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid); 1246 ret = count; 1247 done: 1248 release_update_locks_for_mdev(matrix_mdev); 1249 return ret; 1250 } 1251 static DEVICE_ATTR_WO(unassign_adapter); 1252 1253 static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev, 1254 unsigned long apqi) 1255 { 1256 unsigned long apid; 1257 1258 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) 1259 vfio_ap_mdev_link_apqn(matrix_mdev, 1260 AP_MKQID(apid, apqi)); 1261 } 1262 1263 /** 1264 * assign_domain_store - parses the APQI from @buf and sets the 1265 * corresponding bit in the mediated matrix device's AQM 1266 * 1267 * @dev: the matrix device 1268 * @attr: the mediated matrix device's assign_domain attribute 1269 * @buf: a buffer containing the AP queue index (APQI) of the domain to 1270 * be assigned 1271 * @count: the number of bytes in @buf 1272 * 1273 * Return: the number of bytes processed if the APQI is valid; otherwise returns 1274 * one of the following errors: 1275 * 1276 * 1. -EINVAL 1277 * The APQI is not a valid number 1278 * 1279 * 2. -ENODEV 1280 * The APQI exceeds the maximum value configured for the system 1281 * 1282 * 3. -EADDRNOTAVAIL 1283 * An APQN derived from the cross product of the APQI being assigned 1284 * and the APIDs previously assigned is not bound to the vfio_ap device 1285 * driver; or, if no APIDs have yet been assigned, the APQI is not 1286 * contained in an APQN bound to the vfio_ap device driver. 1287 * 1288 * 4. -EADDRINUSE 1289 * An APQN derived from the cross product of the APQI being assigned 1290 * and the APIDs previously assigned is being used by another mediated 1291 * matrix device 1292 * 1293 * 5. -EAGAIN 1294 * The lock required to validate the mdev's AP configuration could not 1295 * be obtained. 1296 */ 1297 static ssize_t assign_domain_store(struct device *dev, 1298 struct device_attribute *attr, 1299 const char *buf, size_t count) 1300 { 1301 int ret; 1302 unsigned long apqi; 1303 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1304 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1305 1306 mutex_lock(&ap_perms_mutex); 1307 get_update_locks_for_mdev(matrix_mdev); 1308 1309 ret = kstrtoul(buf, 0, &apqi); 1310 if (ret) 1311 goto done; 1312 1313 if (apqi > matrix_mdev->matrix.aqm_max) { 1314 ret = -ENODEV; 1315 goto done; 1316 } 1317 1318 if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { 1319 ret = count; 1320 goto done; 1321 } 1322 1323 set_bit_inv(apqi, matrix_mdev->matrix.aqm); 1324 1325 ret = vfio_ap_mdev_validate_masks(matrix_mdev); 1326 if (ret) { 1327 clear_bit_inv(apqi, matrix_mdev->matrix.aqm); 1328 goto done; 1329 } 1330 1331 vfio_ap_mdev_link_domain(matrix_mdev, apqi); 1332 1333 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 1334 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1335 reset_queues_for_apids(matrix_mdev, apm_filtered); 1336 } 1337 1338 ret = count; 1339 done: 1340 release_update_locks_for_mdev(matrix_mdev); 1341 mutex_unlock(&ap_perms_mutex); 1342 1343 return ret; 1344 } 1345 static DEVICE_ATTR_WO(assign_domain); 1346 1347 static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev, 1348 unsigned long apqi, 1349 struct list_head *qlist) 1350 { 1351 unsigned long apid; 1352 struct vfio_ap_queue *q; 1353 1354 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { 1355 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); 1356 1357 if (q && qlist) { 1358 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 1359 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 1360 list_add_tail(&q->reset_qnode, qlist); 1361 } 1362 } 1363 } 1364 1365 static void vfio_ap_mdev_hot_unplug_domains(struct ap_matrix_mdev *matrix_mdev, 1366 unsigned long *apqis) 1367 { 1368 struct vfio_ap_queue *q, *tmpq; 1369 struct list_head qlist; 1370 unsigned long apqi; 1371 bool apcb_update = false; 1372 1373 INIT_LIST_HEAD(&qlist); 1374 1375 for_each_set_bit_inv(apqi, apqis, AP_DOMAINS) { 1376 vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist); 1377 1378 if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { 1379 clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm); 1380 apcb_update = true; 1381 } 1382 } 1383 1384 /* Only update apcb if needed to avoid impacting guest */ 1385 if (apcb_update) 1386 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1387 1388 vfio_ap_mdev_reset_qlist(&qlist); 1389 1390 list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) { 1391 vfio_ap_unlink_mdev_fr_queue(q); 1392 list_del(&q->reset_qnode); 1393 } 1394 } 1395 1396 static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev, 1397 unsigned long apqi) 1398 { 1399 DECLARE_BITMAP(apqis, AP_DOMAINS); 1400 1401 bitmap_zero(apqis, AP_DEVICES); 1402 set_bit_inv(apqi, apqis); 1403 vfio_ap_mdev_hot_unplug_domains(matrix_mdev, apqis); 1404 } 1405 1406 /** 1407 * unassign_domain_store - parses the APQI from @buf and clears the 1408 * corresponding bit in the mediated matrix device's AQM 1409 * 1410 * @dev: the matrix device 1411 * @attr: the mediated matrix device's unassign_domain attribute 1412 * @buf: a buffer containing the AP queue index (APQI) of the domain to 1413 * be unassigned 1414 * @count: the number of bytes in @buf 1415 * 1416 * Return: the number of bytes processed if the APQI is valid; otherwise, 1417 * returns one of the following errors: 1418 * -EINVAL if the APQI is not a number 1419 * -ENODEV if the APQI exceeds the maximum value configured for the system 1420 */ 1421 static ssize_t unassign_domain_store(struct device *dev, 1422 struct device_attribute *attr, 1423 const char *buf, size_t count) 1424 { 1425 int ret; 1426 unsigned long apqi; 1427 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1428 1429 get_update_locks_for_mdev(matrix_mdev); 1430 1431 ret = kstrtoul(buf, 0, &apqi); 1432 if (ret) 1433 goto done; 1434 1435 if (apqi > matrix_mdev->matrix.aqm_max) { 1436 ret = -ENODEV; 1437 goto done; 1438 } 1439 1440 if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { 1441 ret = count; 1442 goto done; 1443 } 1444 1445 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); 1446 vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi); 1447 ret = count; 1448 1449 done: 1450 release_update_locks_for_mdev(matrix_mdev); 1451 return ret; 1452 } 1453 static DEVICE_ATTR_WO(unassign_domain); 1454 1455 /** 1456 * assign_control_domain_store - parses the domain ID from @buf and sets 1457 * the corresponding bit in the mediated matrix device's ADM 1458 * 1459 * @dev: the matrix device 1460 * @attr: the mediated matrix device's assign_control_domain attribute 1461 * @buf: a buffer containing the domain ID to be assigned 1462 * @count: the number of bytes in @buf 1463 * 1464 * Return: the number of bytes processed if the domain ID is valid; otherwise, 1465 * returns one of the following errors: 1466 * -EINVAL if the ID is not a number 1467 * -ENODEV if the ID exceeds the maximum value configured for the system 1468 */ 1469 static ssize_t assign_control_domain_store(struct device *dev, 1470 struct device_attribute *attr, 1471 const char *buf, size_t count) 1472 { 1473 int ret; 1474 unsigned long id; 1475 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1476 1477 get_update_locks_for_mdev(matrix_mdev); 1478 1479 ret = kstrtoul(buf, 0, &id); 1480 if (ret) 1481 goto done; 1482 1483 if (id > matrix_mdev->matrix.adm_max) { 1484 ret = -ENODEV; 1485 goto done; 1486 } 1487 1488 if (test_bit_inv(id, matrix_mdev->matrix.adm)) { 1489 ret = count; 1490 goto done; 1491 } 1492 1493 /* Set the bit in the ADM (bitmask) corresponding to the AP control 1494 * domain number (id). The bits in the mask, from most significant to 1495 * least significant, correspond to IDs 0 up to the one less than the 1496 * number of control domains that can be assigned. 1497 */ 1498 set_bit_inv(id, matrix_mdev->matrix.adm); 1499 if (vfio_ap_mdev_filter_cdoms(matrix_mdev)) 1500 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1501 1502 ret = count; 1503 done: 1504 release_update_locks_for_mdev(matrix_mdev); 1505 return ret; 1506 } 1507 static DEVICE_ATTR_WO(assign_control_domain); 1508 1509 /** 1510 * unassign_control_domain_store - parses the domain ID from @buf and 1511 * clears the corresponding bit in the mediated matrix device's ADM 1512 * 1513 * @dev: the matrix device 1514 * @attr: the mediated matrix device's unassign_control_domain attribute 1515 * @buf: a buffer containing the domain ID to be unassigned 1516 * @count: the number of bytes in @buf 1517 * 1518 * Return: the number of bytes processed if the domain ID is valid; otherwise, 1519 * returns one of the following errors: 1520 * -EINVAL if the ID is not a number 1521 * -ENODEV if the ID exceeds the maximum value configured for the system 1522 */ 1523 static ssize_t unassign_control_domain_store(struct device *dev, 1524 struct device_attribute *attr, 1525 const char *buf, size_t count) 1526 { 1527 int ret; 1528 unsigned long domid; 1529 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1530 1531 get_update_locks_for_mdev(matrix_mdev); 1532 1533 ret = kstrtoul(buf, 0, &domid); 1534 if (ret) 1535 goto done; 1536 1537 if (domid > matrix_mdev->matrix.adm_max) { 1538 ret = -ENODEV; 1539 goto done; 1540 } 1541 1542 if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) { 1543 ret = count; 1544 goto done; 1545 } 1546 1547 clear_bit_inv(domid, matrix_mdev->matrix.adm); 1548 1549 if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) { 1550 clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm); 1551 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1552 } 1553 1554 ret = count; 1555 done: 1556 release_update_locks_for_mdev(matrix_mdev); 1557 return ret; 1558 } 1559 static DEVICE_ATTR_WO(unassign_control_domain); 1560 1561 static ssize_t control_domains_show(struct device *dev, 1562 struct device_attribute *dev_attr, 1563 char *buf) 1564 { 1565 unsigned long id; 1566 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1567 unsigned long max_domid = matrix_mdev->matrix.adm_max; 1568 int nchars = 0; 1569 1570 mutex_lock(&matrix_dev->mdevs_lock); 1571 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) 1572 nchars += sysfs_emit_at(buf, nchars, "%04lx\n", id); 1573 mutex_unlock(&matrix_dev->mdevs_lock); 1574 1575 return nchars; 1576 } 1577 static DEVICE_ATTR_RO(control_domains); 1578 1579 static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf) 1580 { 1581 unsigned long apid; 1582 unsigned long apqi; 1583 unsigned long apid1; 1584 unsigned long apqi1; 1585 unsigned long napm_bits = matrix->apm_max + 1; 1586 unsigned long naqm_bits = matrix->aqm_max + 1; 1587 int nchars = 0; 1588 1589 apid1 = find_first_bit_inv(matrix->apm, napm_bits); 1590 apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits); 1591 1592 if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) { 1593 for_each_set_bit_inv(apid, matrix->apm, napm_bits) { 1594 for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) 1595 nchars += sysfs_emit_at(buf, nchars, "%02lx.%04lx\n", apid, apqi); 1596 } 1597 } else if (apid1 < napm_bits) { 1598 for_each_set_bit_inv(apid, matrix->apm, napm_bits) 1599 nchars += sysfs_emit_at(buf, nchars, "%02lx.\n", apid); 1600 } else if (apqi1 < naqm_bits) { 1601 for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) 1602 nchars += sysfs_emit_at(buf, nchars, ".%04lx\n", apqi); 1603 } 1604 1605 return nchars; 1606 } 1607 1608 static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, 1609 char *buf) 1610 { 1611 ssize_t nchars; 1612 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1613 1614 mutex_lock(&matrix_dev->mdevs_lock); 1615 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf); 1616 mutex_unlock(&matrix_dev->mdevs_lock); 1617 1618 return nchars; 1619 } 1620 static DEVICE_ATTR_RO(matrix); 1621 1622 static ssize_t guest_matrix_show(struct device *dev, 1623 struct device_attribute *attr, char *buf) 1624 { 1625 ssize_t nchars; 1626 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1627 1628 mutex_lock(&matrix_dev->mdevs_lock); 1629 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf); 1630 mutex_unlock(&matrix_dev->mdevs_lock); 1631 1632 return nchars; 1633 } 1634 static DEVICE_ATTR_RO(guest_matrix); 1635 1636 static ssize_t write_ap_bitmap(unsigned long *bitmap, char *buf, int offset, char sep) 1637 { 1638 return sysfs_emit_at(buf, offset, "0x%016lx%016lx%016lx%016lx%c", 1639 bitmap[0], bitmap[1], bitmap[2], bitmap[3], sep); 1640 } 1641 1642 static ssize_t ap_config_show(struct device *dev, struct device_attribute *attr, 1643 char *buf) 1644 { 1645 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1646 int idx = 0; 1647 1648 idx += write_ap_bitmap(matrix_mdev->matrix.apm, buf, idx, ','); 1649 idx += write_ap_bitmap(matrix_mdev->matrix.aqm, buf, idx, ','); 1650 idx += write_ap_bitmap(matrix_mdev->matrix.adm, buf, idx, '\n'); 1651 1652 return idx; 1653 } 1654 1655 /* Number of characters needed for a complete hex mask representing the bits in .. */ 1656 #define AP_DEVICES_STRLEN (AP_DEVICES / 4 + 3) 1657 #define AP_DOMAINS_STRLEN (AP_DOMAINS / 4 + 3) 1658 #define AP_CONFIG_STRLEN (AP_DEVICES_STRLEN + 2 * AP_DOMAINS_STRLEN) 1659 1660 static int parse_bitmap(char **strbufptr, unsigned long *bitmap, int nbits) 1661 { 1662 char *curmask; 1663 1664 curmask = strsep(strbufptr, ",\n"); 1665 if (!curmask) 1666 return -EINVAL; 1667 1668 bitmap_clear(bitmap, 0, nbits); 1669 return ap_hex2bitmap(curmask, bitmap, nbits); 1670 } 1671 1672 static int ap_matrix_overflow_check(struct ap_matrix_mdev *matrix_mdev) 1673 { 1674 unsigned long bit; 1675 1676 for_each_set_bit_inv(bit, matrix_mdev->matrix.apm, AP_DEVICES) { 1677 if (bit > matrix_mdev->matrix.apm_max) 1678 return -ENODEV; 1679 } 1680 1681 for_each_set_bit_inv(bit, matrix_mdev->matrix.aqm, AP_DOMAINS) { 1682 if (bit > matrix_mdev->matrix.aqm_max) 1683 return -ENODEV; 1684 } 1685 1686 for_each_set_bit_inv(bit, matrix_mdev->matrix.adm, AP_DOMAINS) { 1687 if (bit > matrix_mdev->matrix.adm_max) 1688 return -ENODEV; 1689 } 1690 1691 return 0; 1692 } 1693 1694 static void ap_matrix_copy(struct ap_matrix *dst, struct ap_matrix *src) 1695 { 1696 /* This check works around false positive gcc -Wstringop-overread */ 1697 if (!src) 1698 return; 1699 1700 bitmap_copy(dst->apm, src->apm, AP_DEVICES); 1701 bitmap_copy(dst->aqm, src->aqm, AP_DOMAINS); 1702 bitmap_copy(dst->adm, src->adm, AP_DOMAINS); 1703 } 1704 1705 static ssize_t ap_config_store(struct device *dev, struct device_attribute *attr, 1706 const char *buf, size_t count) 1707 { 1708 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1709 struct ap_matrix m_new, m_old, m_added, m_removed; 1710 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1711 unsigned long newbit; 1712 char *newbuf, *rest; 1713 int rc = count; 1714 bool do_update; 1715 1716 newbuf = kstrndup(buf, AP_CONFIG_STRLEN, GFP_KERNEL); 1717 if (!newbuf) 1718 return -ENOMEM; 1719 rest = newbuf; 1720 1721 mutex_lock(&ap_perms_mutex); 1722 get_update_locks_for_mdev(matrix_mdev); 1723 1724 /* Save old state */ 1725 ap_matrix_copy(&m_old, &matrix_mdev->matrix); 1726 if (parse_bitmap(&rest, m_new.apm, AP_DEVICES) || 1727 parse_bitmap(&rest, m_new.aqm, AP_DOMAINS) || 1728 parse_bitmap(&rest, m_new.adm, AP_DOMAINS)) { 1729 rc = -EINVAL; 1730 goto out; 1731 } 1732 1733 bitmap_andnot(m_removed.apm, m_old.apm, m_new.apm, AP_DEVICES); 1734 bitmap_andnot(m_removed.aqm, m_old.aqm, m_new.aqm, AP_DOMAINS); 1735 bitmap_andnot(m_added.apm, m_new.apm, m_old.apm, AP_DEVICES); 1736 bitmap_andnot(m_added.aqm, m_new.aqm, m_old.aqm, AP_DOMAINS); 1737 1738 /* Need new bitmaps in matrix_mdev for validation */ 1739 ap_matrix_copy(&matrix_mdev->matrix, &m_new); 1740 1741 /* Ensure new state is valid, else undo new state */ 1742 rc = vfio_ap_mdev_validate_masks(matrix_mdev); 1743 if (rc) { 1744 ap_matrix_copy(&matrix_mdev->matrix, &m_old); 1745 goto out; 1746 } 1747 rc = ap_matrix_overflow_check(matrix_mdev); 1748 if (rc) { 1749 ap_matrix_copy(&matrix_mdev->matrix, &m_old); 1750 goto out; 1751 } 1752 rc = count; 1753 1754 /* Need old bitmaps in matrix_mdev for unplug/unlink */ 1755 ap_matrix_copy(&matrix_mdev->matrix, &m_old); 1756 1757 /* Unlink removed adapters/domains */ 1758 vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, m_removed.apm); 1759 vfio_ap_mdev_hot_unplug_domains(matrix_mdev, m_removed.aqm); 1760 1761 /* Need new bitmaps in matrix_mdev for linking new adapters/domains */ 1762 ap_matrix_copy(&matrix_mdev->matrix, &m_new); 1763 1764 /* Link newly added adapters */ 1765 for_each_set_bit_inv(newbit, m_added.apm, AP_DEVICES) 1766 vfio_ap_mdev_link_adapter(matrix_mdev, newbit); 1767 1768 for_each_set_bit_inv(newbit, m_added.aqm, AP_DOMAINS) 1769 vfio_ap_mdev_link_domain(matrix_mdev, newbit); 1770 1771 /* filter resources not bound to vfio-ap */ 1772 do_update = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); 1773 do_update |= vfio_ap_mdev_filter_cdoms(matrix_mdev); 1774 1775 /* Apply changes to shadow apbc if things changed */ 1776 if (do_update) { 1777 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1778 reset_queues_for_apids(matrix_mdev, apm_filtered); 1779 } 1780 out: 1781 release_update_locks_for_mdev(matrix_mdev); 1782 mutex_unlock(&ap_perms_mutex); 1783 kfree(newbuf); 1784 return rc; 1785 } 1786 static DEVICE_ATTR_RW(ap_config); 1787 1788 static struct attribute *vfio_ap_mdev_attrs[] = { 1789 &dev_attr_assign_adapter.attr, 1790 &dev_attr_unassign_adapter.attr, 1791 &dev_attr_assign_domain.attr, 1792 &dev_attr_unassign_domain.attr, 1793 &dev_attr_assign_control_domain.attr, 1794 &dev_attr_unassign_control_domain.attr, 1795 &dev_attr_ap_config.attr, 1796 &dev_attr_control_domains.attr, 1797 &dev_attr_matrix.attr, 1798 &dev_attr_guest_matrix.attr, 1799 NULL, 1800 }; 1801 1802 static struct attribute_group vfio_ap_mdev_attr_group = { 1803 .attrs = vfio_ap_mdev_attrs 1804 }; 1805 1806 static const struct attribute_group *vfio_ap_mdev_attr_groups[] = { 1807 &vfio_ap_mdev_attr_group, 1808 NULL 1809 }; 1810 1811 /** 1812 * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed 1813 * to manage AP resources for the guest whose state is represented by @kvm 1814 * 1815 * @matrix_mdev: a mediated matrix device 1816 * @kvm: reference to KVM instance 1817 * 1818 * Return: 0 if no other mediated matrix device has a reference to @kvm; 1819 * otherwise, returns an -EPERM. 1820 */ 1821 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, 1822 struct kvm *kvm) 1823 { 1824 struct ap_matrix_mdev *m; 1825 1826 if (kvm->arch.crypto.crycbd) { 1827 down_write(&kvm->arch.crypto.pqap_hook_rwsem); 1828 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; 1829 up_write(&kvm->arch.crypto.pqap_hook_rwsem); 1830 1831 get_update_locks_for_kvm(kvm); 1832 1833 list_for_each_entry(m, &matrix_dev->mdev_list, node) { 1834 if (m != matrix_mdev && m->kvm == kvm) { 1835 release_update_locks_for_kvm(kvm); 1836 return -EPERM; 1837 } 1838 } 1839 1840 kvm_get_kvm(kvm); 1841 matrix_mdev->kvm = kvm; 1842 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1843 1844 release_update_locks_for_kvm(kvm); 1845 } 1846 1847 return 0; 1848 } 1849 1850 static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length) 1851 { 1852 struct ap_queue_table *qtable = &matrix_mdev->qtable; 1853 struct vfio_ap_queue *q; 1854 int loop_cursor; 1855 1856 hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { 1857 if (q->saved_iova >= iova && q->saved_iova < iova + length) 1858 vfio_ap_irq_disable(q); 1859 } 1860 } 1861 1862 static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova, 1863 u64 length) 1864 { 1865 struct ap_matrix_mdev *matrix_mdev = 1866 container_of(vdev, struct ap_matrix_mdev, vdev); 1867 1868 mutex_lock(&matrix_dev->mdevs_lock); 1869 1870 unmap_iova(matrix_mdev, iova, length); 1871 1872 mutex_unlock(&matrix_dev->mdevs_lock); 1873 } 1874 1875 /** 1876 * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed 1877 * by @matrix_mdev. 1878 * 1879 * @matrix_mdev: a matrix mediated device 1880 */ 1881 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) 1882 { 1883 struct kvm *kvm = matrix_mdev->kvm; 1884 1885 if (kvm && kvm->arch.crypto.crycbd) { 1886 down_write(&kvm->arch.crypto.pqap_hook_rwsem); 1887 kvm->arch.crypto.pqap_hook = NULL; 1888 up_write(&kvm->arch.crypto.pqap_hook_rwsem); 1889 1890 get_update_locks_for_kvm(kvm); 1891 1892 kvm_arch_crypto_clear_masks(kvm); 1893 vfio_ap_mdev_reset_queues(matrix_mdev); 1894 kvm_put_kvm(kvm); 1895 matrix_mdev->kvm = NULL; 1896 1897 release_update_locks_for_kvm(kvm); 1898 } 1899 } 1900 1901 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn) 1902 { 1903 struct ap_queue *queue; 1904 struct vfio_ap_queue *q = NULL; 1905 1906 queue = ap_get_qdev(apqn); 1907 if (!queue) 1908 return NULL; 1909 1910 if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver) 1911 q = dev_get_drvdata(&queue->ap_dev.device); 1912 1913 put_device(&queue->ap_dev.device); 1914 1915 return q; 1916 } 1917 1918 static int apq_status_check(int apqn, struct ap_queue_status *status) 1919 { 1920 switch (status->response_code) { 1921 case AP_RESPONSE_NORMAL: 1922 case AP_RESPONSE_DECONFIGURED: 1923 case AP_RESPONSE_CHECKSTOPPED: 1924 return 0; 1925 case AP_RESPONSE_RESET_IN_PROGRESS: 1926 case AP_RESPONSE_BUSY: 1927 return -EBUSY; 1928 case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE: 1929 case AP_RESPONSE_ASSOC_FAILED: 1930 /* 1931 * These asynchronous response codes indicate a PQAP(AAPQ) 1932 * instruction to associate a secret with the guest failed. All 1933 * subsequent AP instructions will end with the asynchronous 1934 * response code until the AP queue is reset; so, let's return 1935 * a value indicating a reset needs to be performed again. 1936 */ 1937 return -EAGAIN; 1938 default: 1939 WARN(true, 1940 "failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n", 1941 AP_QID_CARD(apqn), AP_QID_QUEUE(apqn), 1942 status->response_code); 1943 return -EIO; 1944 } 1945 } 1946 1947 #define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)" 1948 1949 static void apq_reset_check(struct work_struct *reset_work) 1950 { 1951 int ret = -EBUSY, elapsed = 0; 1952 struct ap_queue_status status; 1953 struct vfio_ap_queue *q; 1954 1955 q = container_of(reset_work, struct vfio_ap_queue, reset_work); 1956 memcpy(&status, &q->reset_status, sizeof(status)); 1957 while (true) { 1958 msleep(AP_RESET_INTERVAL); 1959 elapsed += AP_RESET_INTERVAL; 1960 status = ap_tapq(q->apqn, NULL); 1961 ret = apq_status_check(q->apqn, &status); 1962 if (ret == -EIO) 1963 return; 1964 if (ret == -EBUSY) { 1965 pr_notice_ratelimited(WAIT_MSG, elapsed, 1966 AP_QID_CARD(q->apqn), 1967 AP_QID_QUEUE(q->apqn), 1968 status.response_code, 1969 status.queue_empty, 1970 status.irq_enabled); 1971 } else { 1972 if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS || 1973 q->reset_status.response_code == AP_RESPONSE_BUSY || 1974 q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS || 1975 ret == -EAGAIN) { 1976 status = ap_zapq(q->apqn, 0); 1977 memcpy(&q->reset_status, &status, sizeof(status)); 1978 continue; 1979 } 1980 if (q->saved_isc != VFIO_AP_ISC_INVALID) 1981 vfio_ap_free_aqic_resources(q); 1982 break; 1983 } 1984 } 1985 } 1986 1987 static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q) 1988 { 1989 struct ap_queue_status status; 1990 1991 if (!q) 1992 return; 1993 status = ap_zapq(q->apqn, 0); 1994 memcpy(&q->reset_status, &status, sizeof(status)); 1995 switch (status.response_code) { 1996 case AP_RESPONSE_NORMAL: 1997 case AP_RESPONSE_RESET_IN_PROGRESS: 1998 case AP_RESPONSE_BUSY: 1999 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS: 2000 /* 2001 * Let's verify whether the ZAPQ completed successfully on a work queue. 2002 */ 2003 queue_work(system_long_wq, &q->reset_work); 2004 break; 2005 case AP_RESPONSE_DECONFIGURED: 2006 case AP_RESPONSE_CHECKSTOPPED: 2007 vfio_ap_free_aqic_resources(q); 2008 break; 2009 default: 2010 WARN(true, 2011 "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n", 2012 AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn), 2013 status.response_code); 2014 } 2015 } 2016 2017 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) 2018 { 2019 int ret = 0, loop_cursor; 2020 struct vfio_ap_queue *q; 2021 2022 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) 2023 vfio_ap_mdev_reset_queue(q); 2024 2025 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) { 2026 flush_work(&q->reset_work); 2027 2028 if (q->reset_status.response_code) 2029 ret = -EIO; 2030 } 2031 2032 return ret; 2033 } 2034 2035 static int vfio_ap_mdev_reset_qlist(struct list_head *qlist) 2036 { 2037 int ret = 0; 2038 struct vfio_ap_queue *q; 2039 2040 list_for_each_entry(q, qlist, reset_qnode) 2041 vfio_ap_mdev_reset_queue(q); 2042 2043 list_for_each_entry(q, qlist, reset_qnode) { 2044 flush_work(&q->reset_work); 2045 2046 if (q->reset_status.response_code) 2047 ret = -EIO; 2048 } 2049 2050 return ret; 2051 } 2052 2053 static int vfio_ap_mdev_open_device(struct vfio_device *vdev) 2054 { 2055 struct ap_matrix_mdev *matrix_mdev = 2056 container_of(vdev, struct ap_matrix_mdev, vdev); 2057 2058 if (!vdev->kvm) 2059 return -EINVAL; 2060 2061 return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm); 2062 } 2063 2064 static void vfio_ap_mdev_close_device(struct vfio_device *vdev) 2065 { 2066 struct ap_matrix_mdev *matrix_mdev = 2067 container_of(vdev, struct ap_matrix_mdev, vdev); 2068 2069 vfio_ap_mdev_unset_kvm(matrix_mdev); 2070 } 2071 2072 static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count) 2073 { 2074 struct device *dev = vdev->dev; 2075 struct ap_matrix_mdev *matrix_mdev; 2076 2077 matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); 2078 2079 get_update_locks_for_mdev(matrix_mdev); 2080 2081 if (matrix_mdev->kvm) { 2082 kvm_arch_crypto_clear_masks(matrix_mdev->kvm); 2083 signal_guest_ap_cfg_changed(matrix_mdev); 2084 } 2085 2086 if (matrix_mdev->req_trigger) { 2087 if (!(count % 10)) 2088 dev_notice_ratelimited(dev, 2089 "Relaying device request to user (#%u)\n", 2090 count); 2091 2092 eventfd_signal(matrix_mdev->req_trigger); 2093 } else if (count == 0) { 2094 dev_notice(dev, 2095 "No device request registered, blocked until released by user\n"); 2096 } 2097 2098 release_update_locks_for_mdev(matrix_mdev); 2099 } 2100 2101 static int vfio_ap_mdev_get_device_info(unsigned long arg) 2102 { 2103 unsigned long minsz; 2104 struct vfio_device_info info; 2105 2106 minsz = offsetofend(struct vfio_device_info, num_irqs); 2107 2108 if (copy_from_user(&info, (void __user *)arg, minsz)) 2109 return -EFAULT; 2110 2111 if (info.argsz < minsz) 2112 return -EINVAL; 2113 2114 info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET; 2115 info.num_regions = 0; 2116 info.num_irqs = VFIO_AP_NUM_IRQS; 2117 2118 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 2119 } 2120 2121 static ssize_t vfio_ap_get_irq_info(unsigned long arg) 2122 { 2123 unsigned long minsz; 2124 struct vfio_irq_info info; 2125 2126 minsz = offsetofend(struct vfio_irq_info, count); 2127 2128 if (copy_from_user(&info, (void __user *)arg, minsz)) 2129 return -EFAULT; 2130 2131 if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS) 2132 return -EINVAL; 2133 2134 switch (info.index) { 2135 case VFIO_AP_REQ_IRQ_INDEX: 2136 info.count = 1; 2137 info.flags = VFIO_IRQ_INFO_EVENTFD; 2138 break; 2139 case VFIO_AP_CFG_CHG_IRQ_INDEX: 2140 info.count = 1; 2141 info.flags = VFIO_IRQ_INFO_EVENTFD; 2142 break; 2143 default: 2144 return -EINVAL; 2145 } 2146 2147 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 2148 } 2149 2150 static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg) 2151 { 2152 int ret; 2153 size_t data_size; 2154 unsigned long minsz; 2155 2156 minsz = offsetofend(struct vfio_irq_set, count); 2157 2158 if (copy_from_user(irq_set, (void __user *)arg, minsz)) 2159 return -EFAULT; 2160 2161 ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS, 2162 &data_size); 2163 if (ret) 2164 return ret; 2165 2166 if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER)) 2167 return -EINVAL; 2168 2169 return 0; 2170 } 2171 2172 static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev, 2173 unsigned long arg) 2174 { 2175 s32 fd; 2176 void __user *data; 2177 unsigned long minsz; 2178 struct eventfd_ctx *req_trigger; 2179 2180 minsz = offsetofend(struct vfio_irq_set, count); 2181 data = (void __user *)(arg + minsz); 2182 2183 if (get_user(fd, (s32 __user *)data)) 2184 return -EFAULT; 2185 2186 if (fd == -1) { 2187 if (matrix_mdev->req_trigger) 2188 eventfd_ctx_put(matrix_mdev->req_trigger); 2189 matrix_mdev->req_trigger = NULL; 2190 } else if (fd >= 0) { 2191 req_trigger = eventfd_ctx_fdget(fd); 2192 if (IS_ERR(req_trigger)) 2193 return PTR_ERR(req_trigger); 2194 2195 if (matrix_mdev->req_trigger) 2196 eventfd_ctx_put(matrix_mdev->req_trigger); 2197 2198 matrix_mdev->req_trigger = req_trigger; 2199 } else { 2200 return -EINVAL; 2201 } 2202 2203 return 0; 2204 } 2205 2206 static int vfio_ap_set_cfg_change_irq(struct ap_matrix_mdev *matrix_mdev, unsigned long arg) 2207 { 2208 s32 fd; 2209 void __user *data; 2210 unsigned long minsz; 2211 struct eventfd_ctx *cfg_chg_trigger; 2212 2213 minsz = offsetofend(struct vfio_irq_set, count); 2214 data = (void __user *)(arg + minsz); 2215 2216 if (get_user(fd, (s32 __user *)data)) 2217 return -EFAULT; 2218 2219 if (fd == -1) { 2220 if (matrix_mdev->cfg_chg_trigger) 2221 eventfd_ctx_put(matrix_mdev->cfg_chg_trigger); 2222 matrix_mdev->cfg_chg_trigger = NULL; 2223 } else if (fd >= 0) { 2224 cfg_chg_trigger = eventfd_ctx_fdget(fd); 2225 if (IS_ERR(cfg_chg_trigger)) 2226 return PTR_ERR(cfg_chg_trigger); 2227 2228 if (matrix_mdev->cfg_chg_trigger) 2229 eventfd_ctx_put(matrix_mdev->cfg_chg_trigger); 2230 2231 matrix_mdev->cfg_chg_trigger = cfg_chg_trigger; 2232 } else { 2233 return -EINVAL; 2234 } 2235 2236 return 0; 2237 } 2238 2239 static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, 2240 unsigned long arg) 2241 { 2242 int ret; 2243 struct vfio_irq_set irq_set; 2244 2245 ret = vfio_ap_irq_set_init(&irq_set, arg); 2246 if (ret) 2247 return ret; 2248 2249 switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { 2250 case VFIO_IRQ_SET_DATA_EVENTFD: 2251 switch (irq_set.index) { 2252 case VFIO_AP_REQ_IRQ_INDEX: 2253 return vfio_ap_set_request_irq(matrix_mdev, arg); 2254 case VFIO_AP_CFG_CHG_IRQ_INDEX: 2255 return vfio_ap_set_cfg_change_irq(matrix_mdev, arg); 2256 default: 2257 return -EINVAL; 2258 } 2259 default: 2260 return -EINVAL; 2261 } 2262 } 2263 2264 static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, 2265 unsigned int cmd, unsigned long arg) 2266 { 2267 struct ap_matrix_mdev *matrix_mdev = 2268 container_of(vdev, struct ap_matrix_mdev, vdev); 2269 int ret; 2270 2271 mutex_lock(&matrix_dev->mdevs_lock); 2272 switch (cmd) { 2273 case VFIO_DEVICE_GET_INFO: 2274 ret = vfio_ap_mdev_get_device_info(arg); 2275 break; 2276 case VFIO_DEVICE_RESET: 2277 ret = vfio_ap_mdev_reset_queues(matrix_mdev); 2278 break; 2279 case VFIO_DEVICE_GET_IRQ_INFO: 2280 ret = vfio_ap_get_irq_info(arg); 2281 break; 2282 case VFIO_DEVICE_SET_IRQS: 2283 ret = vfio_ap_set_irqs(matrix_mdev, arg); 2284 break; 2285 default: 2286 ret = -EOPNOTSUPP; 2287 break; 2288 } 2289 mutex_unlock(&matrix_dev->mdevs_lock); 2290 2291 return ret; 2292 } 2293 2294 static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q) 2295 { 2296 struct ap_matrix_mdev *matrix_mdev; 2297 unsigned long apid = AP_QID_CARD(q->apqn); 2298 unsigned long apqi = AP_QID_QUEUE(q->apqn); 2299 2300 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2301 if (test_bit_inv(apid, matrix_mdev->matrix.apm) && 2302 test_bit_inv(apqi, matrix_mdev->matrix.aqm)) 2303 return matrix_mdev; 2304 } 2305 2306 return NULL; 2307 } 2308 2309 static ssize_t status_show(struct device *dev, 2310 struct device_attribute *attr, 2311 char *buf) 2312 { 2313 ssize_t nchars = 0; 2314 struct vfio_ap_queue *q; 2315 unsigned long apid, apqi; 2316 struct ap_matrix_mdev *matrix_mdev; 2317 struct ap_device *apdev = to_ap_dev(dev); 2318 2319 mutex_lock(&matrix_dev->mdevs_lock); 2320 q = dev_get_drvdata(&apdev->device); 2321 matrix_mdev = vfio_ap_mdev_for_queue(q); 2322 2323 /* If the queue is assigned to the matrix mediated device, then 2324 * determine whether it is passed through to a guest; otherwise, 2325 * indicate that it is unassigned. 2326 */ 2327 if (matrix_mdev) { 2328 apid = AP_QID_CARD(q->apqn); 2329 apqi = AP_QID_QUEUE(q->apqn); 2330 /* 2331 * If the queue is passed through to the guest, then indicate 2332 * that it is in use; otherwise, indicate that it is 2333 * merely assigned to a matrix mediated device. 2334 */ 2335 if (matrix_mdev->kvm && 2336 test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 2337 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 2338 nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_IN_USE); 2339 else 2340 nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_ASSIGNED); 2341 } else { 2342 nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_UNASSIGNED); 2343 } 2344 2345 mutex_unlock(&matrix_dev->mdevs_lock); 2346 2347 return nchars; 2348 } 2349 2350 static DEVICE_ATTR_RO(status); 2351 2352 static struct attribute *vfio_queue_attrs[] = { 2353 &dev_attr_status.attr, 2354 NULL, 2355 }; 2356 2357 static const struct attribute_group vfio_queue_attr_group = { 2358 .attrs = vfio_queue_attrs, 2359 }; 2360 2361 static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { 2362 .init = vfio_ap_mdev_init_dev, 2363 .open_device = vfio_ap_mdev_open_device, 2364 .close_device = vfio_ap_mdev_close_device, 2365 .ioctl = vfio_ap_mdev_ioctl, 2366 .dma_unmap = vfio_ap_mdev_dma_unmap, 2367 .bind_iommufd = vfio_iommufd_emulated_bind, 2368 .unbind_iommufd = vfio_iommufd_emulated_unbind, 2369 .attach_ioas = vfio_iommufd_emulated_attach_ioas, 2370 .detach_ioas = vfio_iommufd_emulated_detach_ioas, 2371 .request = vfio_ap_mdev_request 2372 }; 2373 2374 static struct mdev_driver vfio_ap_matrix_driver = { 2375 .device_api = VFIO_DEVICE_API_AP_STRING, 2376 .max_instances = MAX_ZDEV_ENTRIES_EXT, 2377 .driver = { 2378 .name = "vfio_ap_mdev", 2379 .owner = THIS_MODULE, 2380 .mod_name = KBUILD_MODNAME, 2381 .dev_groups = vfio_ap_mdev_attr_groups, 2382 }, 2383 .probe = vfio_ap_mdev_probe, 2384 .remove = vfio_ap_mdev_remove, 2385 }; 2386 2387 int vfio_ap_mdev_register(void) 2388 { 2389 int ret; 2390 2391 ret = mdev_register_driver(&vfio_ap_matrix_driver); 2392 if (ret) 2393 return ret; 2394 2395 matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT; 2396 matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT; 2397 matrix_dev->mdev_types = &matrix_dev->mdev_type; 2398 ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device, 2399 &vfio_ap_matrix_driver, 2400 &matrix_dev->mdev_types, 1); 2401 if (ret) 2402 goto err_driver; 2403 return 0; 2404 2405 err_driver: 2406 mdev_unregister_driver(&vfio_ap_matrix_driver); 2407 return ret; 2408 } 2409 2410 void vfio_ap_mdev_unregister(void) 2411 { 2412 mdev_unregister_parent(&matrix_dev->parent); 2413 mdev_unregister_driver(&vfio_ap_matrix_driver); 2414 } 2415 2416 int vfio_ap_mdev_probe_queue(struct ap_device *apdev) 2417 { 2418 int ret; 2419 struct vfio_ap_queue *q; 2420 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 2421 struct ap_matrix_mdev *matrix_mdev; 2422 2423 ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group); 2424 if (ret) 2425 return ret; 2426 2427 q = kzalloc(sizeof(*q), GFP_KERNEL); 2428 if (!q) { 2429 ret = -ENOMEM; 2430 goto err_remove_group; 2431 } 2432 2433 q->apqn = to_ap_queue(&apdev->device)->qid; 2434 q->saved_isc = VFIO_AP_ISC_INVALID; 2435 memset(&q->reset_status, 0, sizeof(q->reset_status)); 2436 INIT_WORK(&q->reset_work, apq_reset_check); 2437 matrix_mdev = get_update_locks_by_apqn(q->apqn); 2438 2439 if (matrix_mdev) { 2440 vfio_ap_mdev_link_queue(matrix_mdev, q); 2441 2442 /* 2443 * If we're in the process of handling the adding of adapters or 2444 * domains to the host's AP configuration, then let the 2445 * vfio_ap device driver's on_scan_complete callback filter the 2446 * matrix and update the guest's AP configuration after all of 2447 * the new queue devices are probed. 2448 */ 2449 if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) || 2450 !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS)) 2451 goto done; 2452 2453 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 2454 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2455 reset_queues_for_apids(matrix_mdev, apm_filtered); 2456 } 2457 } 2458 2459 done: 2460 dev_set_drvdata(&apdev->device, q); 2461 release_update_locks_for_mdev(matrix_mdev); 2462 2463 return ret; 2464 2465 err_remove_group: 2466 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); 2467 return ret; 2468 } 2469 2470 void vfio_ap_mdev_remove_queue(struct ap_device *apdev) 2471 { 2472 unsigned long apid, apqi; 2473 struct vfio_ap_queue *q; 2474 struct ap_matrix_mdev *matrix_mdev; 2475 2476 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); 2477 q = dev_get_drvdata(&apdev->device); 2478 get_update_locks_for_queue(q); 2479 matrix_mdev = q->matrix_mdev; 2480 apid = AP_QID_CARD(q->apqn); 2481 apqi = AP_QID_QUEUE(q->apqn); 2482 2483 if (matrix_mdev) { 2484 /* If the queue is assigned to the guest's AP configuration */ 2485 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 2486 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { 2487 /* 2488 * Since the queues are defined via a matrix of adapters 2489 * and domains, it is not possible to hot unplug a 2490 * single queue; so, let's unplug the adapter. 2491 */ 2492 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 2493 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2494 reset_queues_for_apid(matrix_mdev, apid); 2495 goto done; 2496 } 2497 } 2498 2499 /* 2500 * If the queue is not in the host's AP configuration, then resetting 2501 * it will fail with response code 01, (APQN not valid); so, let's make 2502 * sure it is in the host's config. 2503 */ 2504 if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) && 2505 test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) { 2506 vfio_ap_mdev_reset_queue(q); 2507 flush_work(&q->reset_work); 2508 } 2509 2510 done: 2511 if (matrix_mdev) 2512 vfio_ap_unlink_queue_fr_mdev(q); 2513 2514 dev_set_drvdata(&apdev->device, NULL); 2515 kfree(q); 2516 release_update_locks_for_mdev(matrix_mdev); 2517 } 2518 2519 /** 2520 * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is 2521 * assigned to a mediated device under the control 2522 * of the vfio_ap device driver. 2523 * 2524 * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check. 2525 * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check. 2526 * 2527 * Return: 2528 * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are 2529 * assigned to a mediated device under the control of the vfio_ap 2530 * device driver. 2531 * * Otherwise, return 0. 2532 */ 2533 int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm) 2534 { 2535 int ret; 2536 2537 mutex_lock(&matrix_dev->guests_lock); 2538 mutex_lock(&matrix_dev->mdevs_lock); 2539 ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm); 2540 mutex_unlock(&matrix_dev->mdevs_lock); 2541 mutex_unlock(&matrix_dev->guests_lock); 2542 2543 return ret; 2544 } 2545 2546 /** 2547 * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control 2548 * domains that have been removed from the host's 2549 * AP configuration from a guest. 2550 * 2551 * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest. 2552 * @aprem: the adapters that have been removed from the host's AP configuration 2553 * @aqrem: the domains that have been removed from the host's AP configuration 2554 * @cdrem: the control domains that have been removed from the host's AP 2555 * configuration. 2556 */ 2557 static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev, 2558 unsigned long *aprem, 2559 unsigned long *aqrem, 2560 unsigned long *cdrem) 2561 { 2562 int do_hotplug = 0; 2563 2564 if (!bitmap_empty(aprem, AP_DEVICES)) { 2565 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm, 2566 matrix_mdev->shadow_apcb.apm, 2567 aprem, AP_DEVICES); 2568 } 2569 2570 if (!bitmap_empty(aqrem, AP_DOMAINS)) { 2571 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm, 2572 matrix_mdev->shadow_apcb.aqm, 2573 aqrem, AP_DEVICES); 2574 } 2575 2576 if (!bitmap_empty(cdrem, AP_DOMAINS)) 2577 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm, 2578 matrix_mdev->shadow_apcb.adm, 2579 cdrem, AP_DOMAINS); 2580 2581 if (do_hotplug) 2582 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2583 } 2584 2585 /** 2586 * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters, 2587 * domains and control domains that have been removed 2588 * from the host AP configuration and unplugs them 2589 * from those guests. 2590 * 2591 * @ap_remove: bitmap specifying which adapters have been removed from the host 2592 * config. 2593 * @aq_remove: bitmap specifying which domains have been removed from the host 2594 * config. 2595 * @cd_remove: bitmap specifying which control domains have been removed from 2596 * the host config. 2597 */ 2598 static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove, 2599 unsigned long *aq_remove, 2600 unsigned long *cd_remove) 2601 { 2602 struct ap_matrix_mdev *matrix_mdev; 2603 DECLARE_BITMAP(aprem, AP_DEVICES); 2604 DECLARE_BITMAP(aqrem, AP_DOMAINS); 2605 DECLARE_BITMAP(cdrem, AP_DOMAINS); 2606 int do_remove = 0; 2607 2608 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2609 mutex_lock(&matrix_mdev->kvm->lock); 2610 mutex_lock(&matrix_dev->mdevs_lock); 2611 2612 do_remove |= bitmap_and(aprem, ap_remove, 2613 matrix_mdev->matrix.apm, 2614 AP_DEVICES); 2615 do_remove |= bitmap_and(aqrem, aq_remove, 2616 matrix_mdev->matrix.aqm, 2617 AP_DOMAINS); 2618 do_remove |= bitmap_andnot(cdrem, cd_remove, 2619 matrix_mdev->matrix.adm, 2620 AP_DOMAINS); 2621 2622 if (do_remove) 2623 vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem, 2624 cdrem); 2625 2626 mutex_unlock(&matrix_dev->mdevs_lock); 2627 mutex_unlock(&matrix_mdev->kvm->lock); 2628 } 2629 } 2630 2631 /** 2632 * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and 2633 * control domains from the host AP configuration 2634 * by unplugging them from the guests that are 2635 * using them. 2636 * @cur_config_info: the current host AP configuration information 2637 * @prev_config_info: the previous host AP configuration information 2638 */ 2639 static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info, 2640 struct ap_config_info *prev_config_info) 2641 { 2642 int do_remove; 2643 DECLARE_BITMAP(aprem, AP_DEVICES); 2644 DECLARE_BITMAP(aqrem, AP_DOMAINS); 2645 DECLARE_BITMAP(cdrem, AP_DOMAINS); 2646 2647 do_remove = bitmap_andnot(aprem, 2648 (unsigned long *)prev_config_info->apm, 2649 (unsigned long *)cur_config_info->apm, 2650 AP_DEVICES); 2651 do_remove |= bitmap_andnot(aqrem, 2652 (unsigned long *)prev_config_info->aqm, 2653 (unsigned long *)cur_config_info->aqm, 2654 AP_DEVICES); 2655 do_remove |= bitmap_andnot(cdrem, 2656 (unsigned long *)prev_config_info->adm, 2657 (unsigned long *)cur_config_info->adm, 2658 AP_DEVICES); 2659 2660 if (do_remove) 2661 vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem); 2662 } 2663 2664 /** 2665 * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that 2666 * are older than AP type 10 (CEX4). 2667 * @apm: a bitmap of the APIDs to examine 2668 * @aqm: a bitmap of the APQIs of the queues to query for the AP type. 2669 */ 2670 static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm) 2671 { 2672 bool apid_cleared; 2673 struct ap_queue_status status; 2674 unsigned long apid, apqi; 2675 struct ap_tapq_hwinfo info; 2676 2677 for_each_set_bit_inv(apid, apm, AP_DEVICES) { 2678 apid_cleared = false; 2679 2680 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { 2681 status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info); 2682 switch (status.response_code) { 2683 /* 2684 * According to the architecture in each case 2685 * below, the queue's info should be filled. 2686 */ 2687 case AP_RESPONSE_NORMAL: 2688 case AP_RESPONSE_RESET_IN_PROGRESS: 2689 case AP_RESPONSE_DECONFIGURED: 2690 case AP_RESPONSE_CHECKSTOPPED: 2691 case AP_RESPONSE_BUSY: 2692 /* 2693 * The vfio_ap device driver only 2694 * supports CEX4 and newer adapters, so 2695 * remove the APID if the adapter is 2696 * older than a CEX4. 2697 */ 2698 if (info.at < AP_DEVICE_TYPE_CEX4) { 2699 clear_bit_inv(apid, apm); 2700 apid_cleared = true; 2701 } 2702 2703 break; 2704 2705 default: 2706 /* 2707 * If we don't know the adapter type, 2708 * clear its APID since it can't be 2709 * determined whether the vfio_ap 2710 * device driver supports it. 2711 */ 2712 clear_bit_inv(apid, apm); 2713 apid_cleared = true; 2714 break; 2715 } 2716 2717 /* 2718 * If we've already cleared the APID from the apm, there 2719 * is no need to continue examining the remainin AP 2720 * queues to determine the type of the adapter. 2721 */ 2722 if (apid_cleared) 2723 continue; 2724 } 2725 } 2726 } 2727 2728 /** 2729 * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and 2730 * control domains that have been added to the host's 2731 * AP configuration for each matrix mdev to which they 2732 * are assigned. 2733 * 2734 * @apm_add: a bitmap specifying the adapters that have been added to the AP 2735 * configuration. 2736 * @aqm_add: a bitmap specifying the domains that have been added to the AP 2737 * configuration. 2738 * @adm_add: a bitmap specifying the control domains that have been added to the 2739 * AP configuration. 2740 */ 2741 static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add, 2742 unsigned long *adm_add) 2743 { 2744 struct ap_matrix_mdev *matrix_mdev; 2745 2746 if (list_empty(&matrix_dev->mdev_list)) 2747 return; 2748 2749 vfio_ap_filter_apid_by_qtype(apm_add, aqm_add); 2750 2751 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2752 bitmap_and(matrix_mdev->apm_add, 2753 matrix_mdev->matrix.apm, apm_add, AP_DEVICES); 2754 bitmap_and(matrix_mdev->aqm_add, 2755 matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS); 2756 bitmap_and(matrix_mdev->adm_add, 2757 matrix_mdev->matrix.adm, adm_add, AP_DEVICES); 2758 } 2759 } 2760 2761 /** 2762 * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and 2763 * control domains to the host AP configuration 2764 * by updating the bitmaps that specify what adapters, 2765 * domains and control domains have been added so they 2766 * can be hot plugged into the guest when the AP bus 2767 * scan completes (see vfio_ap_on_scan_complete 2768 * function). 2769 * @cur_config_info: the current AP configuration information 2770 * @prev_config_info: the previous AP configuration information 2771 */ 2772 static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info, 2773 struct ap_config_info *prev_config_info) 2774 { 2775 bool do_add; 2776 DECLARE_BITMAP(apm_add, AP_DEVICES); 2777 DECLARE_BITMAP(aqm_add, AP_DOMAINS); 2778 DECLARE_BITMAP(adm_add, AP_DOMAINS); 2779 2780 do_add = bitmap_andnot(apm_add, 2781 (unsigned long *)cur_config_info->apm, 2782 (unsigned long *)prev_config_info->apm, 2783 AP_DEVICES); 2784 do_add |= bitmap_andnot(aqm_add, 2785 (unsigned long *)cur_config_info->aqm, 2786 (unsigned long *)prev_config_info->aqm, 2787 AP_DOMAINS); 2788 do_add |= bitmap_andnot(adm_add, 2789 (unsigned long *)cur_config_info->adm, 2790 (unsigned long *)prev_config_info->adm, 2791 AP_DOMAINS); 2792 2793 if (do_add) 2794 vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add); 2795 } 2796 2797 /** 2798 * vfio_ap_on_cfg_changed - handles notification of changes to the host AP 2799 * configuration. 2800 * 2801 * @cur_cfg_info: the current host AP configuration 2802 * @prev_cfg_info: the previous host AP configuration 2803 */ 2804 void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info, 2805 struct ap_config_info *prev_cfg_info) 2806 { 2807 if (!cur_cfg_info || !prev_cfg_info) 2808 return; 2809 2810 mutex_lock(&matrix_dev->guests_lock); 2811 2812 vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info); 2813 vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info); 2814 memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info)); 2815 2816 mutex_unlock(&matrix_dev->guests_lock); 2817 } 2818 2819 static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev) 2820 { 2821 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 2822 bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false; 2823 2824 mutex_lock(&matrix_mdev->kvm->lock); 2825 mutex_lock(&matrix_dev->mdevs_lock); 2826 2827 filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm, 2828 matrix_mdev->apm_add, AP_DEVICES); 2829 filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm, 2830 matrix_mdev->aqm_add, AP_DOMAINS); 2831 filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm, 2832 matrix_mdev->adm_add, AP_DOMAINS); 2833 2834 if (filter_adapters || filter_domains) 2835 do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); 2836 2837 if (filter_cdoms) 2838 do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev); 2839 2840 if (do_hotplug) 2841 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2842 2843 reset_queues_for_apids(matrix_mdev, apm_filtered); 2844 2845 mutex_unlock(&matrix_dev->mdevs_lock); 2846 mutex_unlock(&matrix_mdev->kvm->lock); 2847 } 2848 2849 void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info, 2850 struct ap_config_info *old_config_info) 2851 { 2852 struct ap_matrix_mdev *matrix_mdev; 2853 2854 mutex_lock(&matrix_dev->guests_lock); 2855 2856 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2857 if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) && 2858 bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) && 2859 bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS)) 2860 continue; 2861 2862 vfio_ap_mdev_hot_plug_cfg(matrix_mdev); 2863 bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES); 2864 bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS); 2865 bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS); 2866 } 2867 2868 mutex_unlock(&matrix_dev->guests_lock); 2869 } 2870