1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2016, 2023
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 *
6 * Adjunct processor bus, queue related code.
7 */
8
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <asm/facility.h>
16
17 #include "ap_bus.h"
18 #include "ap_debug.h"
19
20 static void __ap_flush_queue(struct ap_queue *aq);
21
22 /*
23 * some AP queue helper functions
24 */
25
ap_q_supported_in_se(struct ap_queue * aq)26 static inline bool ap_q_supported_in_se(struct ap_queue *aq)
27 {
28 return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
29 }
30
ap_q_supports_bind(struct ap_queue * aq)31 static inline bool ap_q_supports_bind(struct ap_queue *aq)
32 {
33 return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
34 }
35
ap_q_supports_assoc(struct ap_queue * aq)36 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
37 {
38 return aq->card->hwinfo.ep11;
39 }
40
ap_q_needs_bind(struct ap_queue * aq)41 static inline bool ap_q_needs_bind(struct ap_queue *aq)
42 {
43 return ap_q_supports_bind(aq) && ap_sb_available();
44 }
45
46 /**
47 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
48 * @aq: The AP queue
49 * @ind: the notification indicator byte
50 *
51 * Enables interruption on AP queue via ap_aqic(). Based on the return
52 * value it waits a while and tests the AP queue if interrupts
53 * have been switched on using ap_test_queue().
54 */
ap_queue_enable_irq(struct ap_queue * aq,void * ind)55 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
56 {
57 union ap_qirq_ctrl qirqctrl = { .value = 0 };
58 struct ap_queue_status status;
59
60 qirqctrl.ir = 1;
61 qirqctrl.isc = AP_ISC;
62 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
63 if (status.async)
64 return -EPERM;
65 switch (status.response_code) {
66 case AP_RESPONSE_NORMAL:
67 case AP_RESPONSE_OTHERWISE_CHANGED:
68 return 0;
69 case AP_RESPONSE_Q_NOT_AVAIL:
70 case AP_RESPONSE_DECONFIGURED:
71 case AP_RESPONSE_CHECKSTOPPED:
72 case AP_RESPONSE_INVALID_ADDRESS:
73 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
74 AP_QID_CARD(aq->qid),
75 AP_QID_QUEUE(aq->qid));
76 return -EOPNOTSUPP;
77 case AP_RESPONSE_RESET_IN_PROGRESS:
78 case AP_RESPONSE_BUSY:
79 default:
80 return -EBUSY;
81 }
82 }
83
84 /**
85 * __ap_send(): Send message to adjunct processor queue.
86 * @qid: The AP queue number
87 * @psmid: The program supplied message identifier
88 * @msg: The message text
89 * @msglen: The message length
90 * @special: Special Bit
91 *
92 * Returns AP queue status structure.
93 * Condition code 1 on NQAP can't happen because the L bit is 1.
94 * Condition code 2 on NQAP also means the send is incomplete,
95 * because a segment boundary was reached. The NQAP is repeated.
96 */
97 static inline struct ap_queue_status
__ap_send(ap_qid_t qid,unsigned long psmid,void * msg,size_t msglen,int special)98 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
99 int special)
100 {
101 if (special)
102 qid |= 0x400000UL;
103 return ap_nqap(qid, psmid, msg, msglen);
104 }
105
106 /* State machine definitions and helpers */
107
ap_sm_nop(struct ap_queue * aq)108 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
109 {
110 return AP_SM_WAIT_NONE;
111 }
112
113 /**
114 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
115 * not change the state of the device.
116 * @aq: pointer to the AP queue
117 *
118 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
119 */
ap_sm_recv(struct ap_queue * aq)120 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
121 {
122 struct ap_queue_status status;
123 struct ap_message *ap_msg;
124 bool found = false;
125 size_t reslen;
126 unsigned long resgr0 = 0;
127 int parts = 0;
128
129 /*
130 * DQAP loop until response code and resgr0 indicate that
131 * the msg is totally received. As we use the very same buffer
132 * the msg is overwritten with each invocation. That's intended
133 * and the receiver of the msg is informed with a msg rc code
134 * of EMSGSIZE in such a case.
135 */
136 do {
137 status = ap_dqap(aq->qid, &aq->reply->psmid,
138 aq->reply->msg, aq->reply->bufsize,
139 &aq->reply->len, &reslen, &resgr0);
140 parts++;
141 } while (status.response_code == 0xFF && resgr0 != 0);
142
143 switch (status.response_code) {
144 case AP_RESPONSE_NORMAL:
145 print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
146 aq->reply->msg, aq->reply->len, false);
147 aq->queue_count = max_t(int, 0, aq->queue_count - 1);
148 if (!status.queue_empty && !aq->queue_count)
149 aq->queue_count++;
150 if (aq->queue_count > 0)
151 mod_timer(&aq->timeout,
152 jiffies + aq->request_timeout);
153 list_for_each_entry(ap_msg, &aq->pendingq, list) {
154 if (ap_msg->psmid != aq->reply->psmid)
155 continue;
156 list_del_init(&ap_msg->list);
157 aq->pendingq_count--;
158 if (parts > 1) {
159 ap_msg->rc = -EMSGSIZE;
160 ap_msg->receive(aq, ap_msg, NULL);
161 } else {
162 ap_msg->receive(aq, ap_msg, aq->reply);
163 }
164 found = true;
165 break;
166 }
167 if (!found) {
168 AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
169 __func__, aq->reply->psmid,
170 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
171 }
172 fallthrough;
173 case AP_RESPONSE_NO_PENDING_REPLY:
174 if (!status.queue_empty || aq->queue_count <= 0)
175 break;
176 /* The card shouldn't forget requests but who knows. */
177 aq->queue_count = 0;
178 list_splice_init(&aq->pendingq, &aq->requestq);
179 aq->requestq_count += aq->pendingq_count;
180 pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
181 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
182 aq->pendingq_count, aq->requestq_count);
183 aq->pendingq_count = 0;
184 break;
185 default:
186 break;
187 }
188 return status;
189 }
190
191 /**
192 * ap_sm_read(): Receive pending reply messages from an AP queue.
193 * @aq: pointer to the AP queue
194 *
195 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
196 */
ap_sm_read(struct ap_queue * aq)197 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
198 {
199 struct ap_queue_status status;
200
201 if (!aq->reply)
202 return AP_SM_WAIT_NONE;
203 status = ap_sm_recv(aq);
204 if (status.async)
205 return AP_SM_WAIT_NONE;
206 switch (status.response_code) {
207 case AP_RESPONSE_NORMAL:
208 if (aq->queue_count > 0) {
209 aq->sm_state = AP_SM_STATE_WORKING;
210 return AP_SM_WAIT_AGAIN;
211 }
212 aq->sm_state = AP_SM_STATE_IDLE;
213 break;
214 case AP_RESPONSE_NO_PENDING_REPLY:
215 if (aq->queue_count > 0)
216 return status.irq_enabled ?
217 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
218 aq->sm_state = AP_SM_STATE_IDLE;
219 break;
220 default:
221 aq->dev_state = AP_DEV_STATE_ERROR;
222 aq->last_err_rc = status.response_code;
223 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
224 __func__, status.response_code,
225 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
226 return AP_SM_WAIT_NONE;
227 }
228 /* Check and maybe enable irq support (again) on this queue */
229 if (!status.irq_enabled && status.queue_empty) {
230 void *lsi_ptr = ap_airq_ptr();
231
232 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
233 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
234 return AP_SM_WAIT_AGAIN;
235 }
236 }
237 return AP_SM_WAIT_NONE;
238 }
239
240 /**
241 * ap_sm_write(): Send messages from the request queue to an AP queue.
242 * @aq: pointer to the AP queue
243 *
244 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
245 */
ap_sm_write(struct ap_queue * aq)246 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
247 {
248 struct ap_queue_status status;
249 struct ap_message *ap_msg;
250 ap_qid_t qid = aq->qid;
251
252 if (aq->requestq_count <= 0)
253 return AP_SM_WAIT_NONE;
254
255 /* Start the next request on the queue. */
256 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
257 print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
258 ap_msg->msg, ap_msg->len, false);
259 status = __ap_send(qid, ap_msg->psmid,
260 ap_msg->msg, ap_msg->len,
261 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
262 if (status.async)
263 return AP_SM_WAIT_NONE;
264 switch (status.response_code) {
265 case AP_RESPONSE_NORMAL:
266 aq->queue_count = max_t(int, 1, aq->queue_count + 1);
267 if (aq->queue_count == 1)
268 mod_timer(&aq->timeout, jiffies + aq->request_timeout);
269 list_move_tail(&ap_msg->list, &aq->pendingq);
270 aq->requestq_count--;
271 aq->pendingq_count++;
272 if (aq->queue_count < aq->card->hwinfo.qd) {
273 aq->sm_state = AP_SM_STATE_WORKING;
274 return AP_SM_WAIT_AGAIN;
275 }
276 fallthrough;
277 case AP_RESPONSE_Q_FULL:
278 aq->sm_state = AP_SM_STATE_QUEUE_FULL;
279 return status.irq_enabled ?
280 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
281 case AP_RESPONSE_RESET_IN_PROGRESS:
282 aq->sm_state = AP_SM_STATE_RESET_WAIT;
283 return AP_SM_WAIT_LOW_TIMEOUT;
284 case AP_RESPONSE_INVALID_DOMAIN:
285 AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
286 fallthrough;
287 case AP_RESPONSE_MESSAGE_TOO_BIG:
288 case AP_RESPONSE_REQ_FAC_NOT_INST:
289 list_del_init(&ap_msg->list);
290 aq->requestq_count--;
291 ap_msg->rc = -EINVAL;
292 ap_msg->receive(aq, ap_msg, NULL);
293 return AP_SM_WAIT_AGAIN;
294 default:
295 aq->dev_state = AP_DEV_STATE_ERROR;
296 aq->last_err_rc = status.response_code;
297 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
298 __func__, status.response_code,
299 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
300 return AP_SM_WAIT_NONE;
301 }
302 }
303
304 /**
305 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
306 * @aq: pointer to the AP queue
307 *
308 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
309 */
ap_sm_read_write(struct ap_queue * aq)310 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
311 {
312 return min(ap_sm_read(aq), ap_sm_write(aq));
313 }
314
315 /**
316 * ap_sm_reset(): Reset an AP queue.
317 * @aq: The AP queue
318 *
319 * Submit the Reset command to an AP queue.
320 */
ap_sm_reset(struct ap_queue * aq)321 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
322 {
323 struct ap_queue_status status;
324
325 status = ap_rapq(aq->qid, aq->rapq_fbit);
326 if (status.async)
327 return AP_SM_WAIT_NONE;
328 switch (status.response_code) {
329 case AP_RESPONSE_NORMAL:
330 case AP_RESPONSE_RESET_IN_PROGRESS:
331 aq->sm_state = AP_SM_STATE_RESET_WAIT;
332 aq->rapq_fbit = 0;
333 return AP_SM_WAIT_LOW_TIMEOUT;
334 default:
335 aq->dev_state = AP_DEV_STATE_ERROR;
336 aq->last_err_rc = status.response_code;
337 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
338 __func__, status.response_code,
339 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
340 return AP_SM_WAIT_NONE;
341 }
342 }
343
344 /**
345 * ap_sm_reset_wait(): Test queue for completion of the reset operation
346 * @aq: pointer to the AP queue
347 *
348 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
349 */
ap_sm_reset_wait(struct ap_queue * aq)350 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
351 {
352 struct ap_queue_status status;
353 struct ap_tapq_hwinfo hwinfo;
354 void *lsi_ptr;
355
356 /* Get the status with TAPQ */
357 status = ap_test_queue(aq->qid, 1, &hwinfo);
358
359 switch (status.response_code) {
360 case AP_RESPONSE_NORMAL:
361 aq->se_bstate = hwinfo.bs;
362 lsi_ptr = ap_airq_ptr();
363 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
364 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
365 else
366 aq->sm_state = (aq->queue_count > 0) ?
367 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
368 return AP_SM_WAIT_AGAIN;
369 case AP_RESPONSE_BUSY:
370 case AP_RESPONSE_RESET_IN_PROGRESS:
371 return AP_SM_WAIT_LOW_TIMEOUT;
372 case AP_RESPONSE_Q_NOT_AVAIL:
373 case AP_RESPONSE_DECONFIGURED:
374 case AP_RESPONSE_CHECKSTOPPED:
375 default:
376 aq->dev_state = AP_DEV_STATE_ERROR;
377 aq->last_err_rc = status.response_code;
378 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
379 __func__, status.response_code,
380 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
381 return AP_SM_WAIT_NONE;
382 }
383 }
384
385 /**
386 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
387 * @aq: pointer to the AP queue
388 *
389 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
390 */
ap_sm_setirq_wait(struct ap_queue * aq)391 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
392 {
393 struct ap_queue_status status;
394
395 if (aq->queue_count > 0 && aq->reply)
396 /* Try to read a completed message and get the status */
397 status = ap_sm_recv(aq);
398 else
399 /* Get the status with TAPQ */
400 status = ap_tapq(aq->qid, NULL);
401
402 if (status.irq_enabled == 1) {
403 /* Irqs are now enabled */
404 aq->sm_state = (aq->queue_count > 0) ?
405 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
406 }
407
408 switch (status.response_code) {
409 case AP_RESPONSE_NORMAL:
410 if (aq->queue_count > 0)
411 return AP_SM_WAIT_AGAIN;
412 fallthrough;
413 case AP_RESPONSE_NO_PENDING_REPLY:
414 return AP_SM_WAIT_LOW_TIMEOUT;
415 default:
416 aq->dev_state = AP_DEV_STATE_ERROR;
417 aq->last_err_rc = status.response_code;
418 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
419 __func__, status.response_code,
420 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
421 return AP_SM_WAIT_NONE;
422 }
423 }
424
425 /**
426 * ap_sm_assoc_wait(): Test queue for completion of a pending
427 * association request.
428 * @aq: pointer to the AP queue
429 */
ap_sm_assoc_wait(struct ap_queue * aq)430 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
431 {
432 struct ap_queue_status status;
433 struct ap_tapq_hwinfo hwinfo;
434
435 status = ap_test_queue(aq->qid, 1, &hwinfo);
436 /* handle asynchronous error on this queue */
437 if (status.async && status.response_code) {
438 aq->dev_state = AP_DEV_STATE_ERROR;
439 aq->last_err_rc = status.response_code;
440 AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
441 __func__, status.response_code,
442 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
443 return AP_SM_WAIT_NONE;
444 }
445 if (status.response_code > AP_RESPONSE_BUSY) {
446 aq->dev_state = AP_DEV_STATE_ERROR;
447 aq->last_err_rc = status.response_code;
448 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
449 __func__, status.response_code,
450 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
451 return AP_SM_WAIT_NONE;
452 }
453
454 /* update queue's SE bind state */
455 aq->se_bstate = hwinfo.bs;
456
457 /* check bs bits */
458 switch (hwinfo.bs) {
459 case AP_BS_Q_USABLE:
460 /* association is through */
461 aq->sm_state = AP_SM_STATE_IDLE;
462 pr_debug("queue 0x%02x.%04x associated with %u\n",
463 AP_QID_CARD(aq->qid),
464 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
465 return AP_SM_WAIT_NONE;
466 case AP_BS_Q_USABLE_NO_SECURE_KEY:
467 /* association still pending */
468 return AP_SM_WAIT_LOW_TIMEOUT;
469 default:
470 /* reset from 'outside' happened or no idea at all */
471 aq->assoc_idx = ASSOC_IDX_INVALID;
472 aq->dev_state = AP_DEV_STATE_ERROR;
473 aq->last_err_rc = status.response_code;
474 AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
475 __func__, hwinfo.bs,
476 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
477 return AP_SM_WAIT_NONE;
478 }
479 }
480
481 /*
482 * AP state machine jump table
483 */
484 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
485 [AP_SM_STATE_RESET_START] = {
486 [AP_SM_EVENT_POLL] = ap_sm_reset,
487 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
488 },
489 [AP_SM_STATE_RESET_WAIT] = {
490 [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
491 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
492 },
493 [AP_SM_STATE_SETIRQ_WAIT] = {
494 [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
495 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
496 },
497 [AP_SM_STATE_IDLE] = {
498 [AP_SM_EVENT_POLL] = ap_sm_write,
499 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
500 },
501 [AP_SM_STATE_WORKING] = {
502 [AP_SM_EVENT_POLL] = ap_sm_read_write,
503 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
504 },
505 [AP_SM_STATE_QUEUE_FULL] = {
506 [AP_SM_EVENT_POLL] = ap_sm_read,
507 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
508 },
509 [AP_SM_STATE_ASSOC_WAIT] = {
510 [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
511 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
512 },
513 };
514
ap_sm_event(struct ap_queue * aq,enum ap_sm_event event)515 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
516 {
517 if (aq->config && !aq->chkstop &&
518 aq->dev_state > AP_DEV_STATE_UNINITIATED)
519 return ap_jumptable[aq->sm_state][event](aq);
520 else
521 return AP_SM_WAIT_NONE;
522 }
523
ap_sm_event_loop(struct ap_queue * aq,enum ap_sm_event event)524 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
525 {
526 enum ap_sm_wait wait;
527
528 while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
529 ;
530 return wait;
531 }
532
533 /*
534 * AP queue related attributes.
535 */
request_count_show(struct device * dev,struct device_attribute * attr,char * buf)536 static ssize_t request_count_show(struct device *dev,
537 struct device_attribute *attr,
538 char *buf)
539 {
540 struct ap_queue *aq = to_ap_queue(dev);
541 bool valid = false;
542 u64 req_cnt;
543
544 spin_lock_bh(&aq->lock);
545 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
546 req_cnt = aq->total_request_count;
547 valid = true;
548 }
549 spin_unlock_bh(&aq->lock);
550
551 if (valid)
552 return sysfs_emit(buf, "%llu\n", req_cnt);
553 else
554 return sysfs_emit(buf, "-\n");
555 }
556
request_count_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)557 static ssize_t request_count_store(struct device *dev,
558 struct device_attribute *attr,
559 const char *buf, size_t count)
560 {
561 struct ap_queue *aq = to_ap_queue(dev);
562
563 spin_lock_bh(&aq->lock);
564 aq->total_request_count = 0;
565 spin_unlock_bh(&aq->lock);
566
567 return count;
568 }
569
570 static DEVICE_ATTR_RW(request_count);
571
requestq_count_show(struct device * dev,struct device_attribute * attr,char * buf)572 static ssize_t requestq_count_show(struct device *dev,
573 struct device_attribute *attr, char *buf)
574 {
575 struct ap_queue *aq = to_ap_queue(dev);
576 unsigned int reqq_cnt = 0;
577
578 spin_lock_bh(&aq->lock);
579 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
580 reqq_cnt = aq->requestq_count;
581 spin_unlock_bh(&aq->lock);
582 return sysfs_emit(buf, "%d\n", reqq_cnt);
583 }
584
585 static DEVICE_ATTR_RO(requestq_count);
586
pendingq_count_show(struct device * dev,struct device_attribute * attr,char * buf)587 static ssize_t pendingq_count_show(struct device *dev,
588 struct device_attribute *attr, char *buf)
589 {
590 struct ap_queue *aq = to_ap_queue(dev);
591 unsigned int penq_cnt = 0;
592
593 spin_lock_bh(&aq->lock);
594 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
595 penq_cnt = aq->pendingq_count;
596 spin_unlock_bh(&aq->lock);
597 return sysfs_emit(buf, "%d\n", penq_cnt);
598 }
599
600 static DEVICE_ATTR_RO(pendingq_count);
601
reset_show(struct device * dev,struct device_attribute * attr,char * buf)602 static ssize_t reset_show(struct device *dev,
603 struct device_attribute *attr, char *buf)
604 {
605 struct ap_queue *aq = to_ap_queue(dev);
606 int rc = 0;
607
608 spin_lock_bh(&aq->lock);
609 switch (aq->sm_state) {
610 case AP_SM_STATE_RESET_START:
611 case AP_SM_STATE_RESET_WAIT:
612 rc = sysfs_emit(buf, "Reset in progress.\n");
613 break;
614 case AP_SM_STATE_WORKING:
615 case AP_SM_STATE_QUEUE_FULL:
616 rc = sysfs_emit(buf, "Reset Timer armed.\n");
617 break;
618 default:
619 rc = sysfs_emit(buf, "No Reset Timer set.\n");
620 }
621 spin_unlock_bh(&aq->lock);
622 return rc;
623 }
624
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)625 static ssize_t reset_store(struct device *dev,
626 struct device_attribute *attr,
627 const char *buf, size_t count)
628 {
629 struct ap_queue *aq = to_ap_queue(dev);
630
631 spin_lock_bh(&aq->lock);
632 __ap_flush_queue(aq);
633 aq->sm_state = AP_SM_STATE_RESET_START;
634 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
635 spin_unlock_bh(&aq->lock);
636
637 AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
638 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
639
640 return count;
641 }
642
643 static DEVICE_ATTR_RW(reset);
644
interrupt_show(struct device * dev,struct device_attribute * attr,char * buf)645 static ssize_t interrupt_show(struct device *dev,
646 struct device_attribute *attr, char *buf)
647 {
648 struct ap_queue *aq = to_ap_queue(dev);
649 struct ap_queue_status status;
650 int rc = 0;
651
652 spin_lock_bh(&aq->lock);
653 if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
654 rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
655 } else {
656 status = ap_tapq(aq->qid, NULL);
657 if (status.irq_enabled)
658 rc = sysfs_emit(buf, "Interrupts enabled.\n");
659 else
660 rc = sysfs_emit(buf, "Interrupts disabled.\n");
661 }
662 spin_unlock_bh(&aq->lock);
663
664 return rc;
665 }
666
667 static DEVICE_ATTR_RO(interrupt);
668
config_show(struct device * dev,struct device_attribute * attr,char * buf)669 static ssize_t config_show(struct device *dev,
670 struct device_attribute *attr, char *buf)
671 {
672 struct ap_queue *aq = to_ap_queue(dev);
673 int rc;
674
675 spin_lock_bh(&aq->lock);
676 rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
677 spin_unlock_bh(&aq->lock);
678 return rc;
679 }
680
681 static DEVICE_ATTR_RO(config);
682
chkstop_show(struct device * dev,struct device_attribute * attr,char * buf)683 static ssize_t chkstop_show(struct device *dev,
684 struct device_attribute *attr, char *buf)
685 {
686 struct ap_queue *aq = to_ap_queue(dev);
687 int rc;
688
689 spin_lock_bh(&aq->lock);
690 rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
691 spin_unlock_bh(&aq->lock);
692 return rc;
693 }
694
695 static DEVICE_ATTR_RO(chkstop);
696
ap_functions_show(struct device * dev,struct device_attribute * attr,char * buf)697 static ssize_t ap_functions_show(struct device *dev,
698 struct device_attribute *attr, char *buf)
699 {
700 struct ap_queue *aq = to_ap_queue(dev);
701 struct ap_queue_status status;
702 struct ap_tapq_hwinfo hwinfo;
703
704 status = ap_test_queue(aq->qid, 1, &hwinfo);
705 if (status.response_code > AP_RESPONSE_BUSY) {
706 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
707 status.response_code,
708 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
709 return -EIO;
710 }
711
712 return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
713 }
714
715 static DEVICE_ATTR_RO(ap_functions);
716
717 #ifdef CONFIG_AP_DEBUG
states_show(struct device * dev,struct device_attribute * attr,char * buf)718 static ssize_t states_show(struct device *dev,
719 struct device_attribute *attr, char *buf)
720 {
721 struct ap_queue *aq = to_ap_queue(dev);
722 int rc = 0;
723
724 spin_lock_bh(&aq->lock);
725 /* queue device state */
726 switch (aq->dev_state) {
727 case AP_DEV_STATE_UNINITIATED:
728 rc = sysfs_emit(buf, "UNINITIATED\n");
729 break;
730 case AP_DEV_STATE_OPERATING:
731 rc = sysfs_emit(buf, "OPERATING");
732 break;
733 case AP_DEV_STATE_SHUTDOWN:
734 rc = sysfs_emit(buf, "SHUTDOWN");
735 break;
736 case AP_DEV_STATE_ERROR:
737 rc = sysfs_emit(buf, "ERROR");
738 break;
739 default:
740 rc = sysfs_emit(buf, "UNKNOWN");
741 }
742 /* state machine state */
743 if (aq->dev_state) {
744 switch (aq->sm_state) {
745 case AP_SM_STATE_RESET_START:
746 rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
747 break;
748 case AP_SM_STATE_RESET_WAIT:
749 rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
750 break;
751 case AP_SM_STATE_SETIRQ_WAIT:
752 rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
753 break;
754 case AP_SM_STATE_IDLE:
755 rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
756 break;
757 case AP_SM_STATE_WORKING:
758 rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
759 break;
760 case AP_SM_STATE_QUEUE_FULL:
761 rc += sysfs_emit_at(buf, rc, " [FULL]\n");
762 break;
763 case AP_SM_STATE_ASSOC_WAIT:
764 rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
765 break;
766 default:
767 rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
768 }
769 }
770 spin_unlock_bh(&aq->lock);
771
772 return rc;
773 }
774 static DEVICE_ATTR_RO(states);
775
last_err_rc_show(struct device * dev,struct device_attribute * attr,char * buf)776 static ssize_t last_err_rc_show(struct device *dev,
777 struct device_attribute *attr, char *buf)
778 {
779 struct ap_queue *aq = to_ap_queue(dev);
780 int rc;
781
782 spin_lock_bh(&aq->lock);
783 rc = aq->last_err_rc;
784 spin_unlock_bh(&aq->lock);
785
786 switch (rc) {
787 case AP_RESPONSE_NORMAL:
788 return sysfs_emit(buf, "NORMAL\n");
789 case AP_RESPONSE_Q_NOT_AVAIL:
790 return sysfs_emit(buf, "Q_NOT_AVAIL\n");
791 case AP_RESPONSE_RESET_IN_PROGRESS:
792 return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
793 case AP_RESPONSE_DECONFIGURED:
794 return sysfs_emit(buf, "DECONFIGURED\n");
795 case AP_RESPONSE_CHECKSTOPPED:
796 return sysfs_emit(buf, "CHECKSTOPPED\n");
797 case AP_RESPONSE_BUSY:
798 return sysfs_emit(buf, "BUSY\n");
799 case AP_RESPONSE_INVALID_ADDRESS:
800 return sysfs_emit(buf, "INVALID_ADDRESS\n");
801 case AP_RESPONSE_OTHERWISE_CHANGED:
802 return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
803 case AP_RESPONSE_Q_FULL:
804 return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
805 case AP_RESPONSE_INDEX_TOO_BIG:
806 return sysfs_emit(buf, "INDEX_TOO_BIG\n");
807 case AP_RESPONSE_NO_FIRST_PART:
808 return sysfs_emit(buf, "NO_FIRST_PART\n");
809 case AP_RESPONSE_MESSAGE_TOO_BIG:
810 return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
811 case AP_RESPONSE_REQ_FAC_NOT_INST:
812 return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
813 default:
814 return sysfs_emit(buf, "response code %d\n", rc);
815 }
816 }
817 static DEVICE_ATTR_RO(last_err_rc);
818 #endif
819
820 static struct attribute *ap_queue_dev_attrs[] = {
821 &dev_attr_request_count.attr,
822 &dev_attr_requestq_count.attr,
823 &dev_attr_pendingq_count.attr,
824 &dev_attr_reset.attr,
825 &dev_attr_interrupt.attr,
826 &dev_attr_config.attr,
827 &dev_attr_chkstop.attr,
828 &dev_attr_ap_functions.attr,
829 #ifdef CONFIG_AP_DEBUG
830 &dev_attr_states.attr,
831 &dev_attr_last_err_rc.attr,
832 #endif
833 NULL
834 };
835
836 static struct attribute_group ap_queue_dev_attr_group = {
837 .attrs = ap_queue_dev_attrs
838 };
839
840 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
841 &ap_queue_dev_attr_group,
842 NULL
843 };
844
845 static struct device_type ap_queue_type = {
846 .name = "ap_queue",
847 .groups = ap_queue_dev_attr_groups,
848 };
849
se_bind_show(struct device * dev,struct device_attribute * attr,char * buf)850 static ssize_t se_bind_show(struct device *dev,
851 struct device_attribute *attr, char *buf)
852 {
853 struct ap_queue *aq = to_ap_queue(dev);
854 struct ap_queue_status status;
855 struct ap_tapq_hwinfo hwinfo;
856
857 if (!ap_q_supports_bind(aq))
858 return sysfs_emit(buf, "-\n");
859
860 status = ap_test_queue(aq->qid, 1, &hwinfo);
861 if (status.response_code > AP_RESPONSE_BUSY) {
862 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
863 status.response_code,
864 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
865 return -EIO;
866 }
867
868 /* update queue's SE bind state */
869 spin_lock_bh(&aq->lock);
870 aq->se_bstate = hwinfo.bs;
871 spin_unlock_bh(&aq->lock);
872
873 switch (hwinfo.bs) {
874 case AP_BS_Q_USABLE:
875 case AP_BS_Q_USABLE_NO_SECURE_KEY:
876 return sysfs_emit(buf, "bound\n");
877 default:
878 return sysfs_emit(buf, "unbound\n");
879 }
880 }
881
se_bind_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)882 static ssize_t se_bind_store(struct device *dev,
883 struct device_attribute *attr,
884 const char *buf, size_t count)
885 {
886 struct ap_queue *aq = to_ap_queue(dev);
887 struct ap_queue_status status;
888 struct ap_tapq_hwinfo hwinfo;
889 bool value;
890 int rc;
891
892 if (!ap_q_supports_bind(aq))
893 return -EINVAL;
894
895 /* only 0 (unbind) and 1 (bind) allowed */
896 rc = kstrtobool(buf, &value);
897 if (rc)
898 return rc;
899
900 if (!value) {
901 /* Unbind. Set F bit arg and trigger RAPQ */
902 spin_lock_bh(&aq->lock);
903 __ap_flush_queue(aq);
904 aq->rapq_fbit = 1;
905 _ap_queue_init_state(aq);
906 rc = count;
907 goto out;
908 }
909
910 /* Bind. Check current SE bind state */
911 status = ap_test_queue(aq->qid, 1, &hwinfo);
912 if (status.response_code) {
913 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
914 __func__, status.response_code,
915 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
916 return -EIO;
917 }
918
919 /* Update BS state */
920 spin_lock_bh(&aq->lock);
921 aq->se_bstate = hwinfo.bs;
922 if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
923 AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
924 __func__, hwinfo.bs,
925 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
926 rc = -EINVAL;
927 goto out;
928 }
929
930 /* Check SM state */
931 if (aq->sm_state < AP_SM_STATE_IDLE) {
932 rc = -EBUSY;
933 goto out;
934 }
935
936 /* invoke BAPQ */
937 status = ap_bapq(aq->qid);
938 if (status.response_code) {
939 AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
940 __func__, status.response_code,
941 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
942 rc = -EIO;
943 goto out;
944 }
945 aq->assoc_idx = ASSOC_IDX_INVALID;
946
947 /* verify SE bind state */
948 status = ap_test_queue(aq->qid, 1, &hwinfo);
949 if (status.response_code) {
950 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
951 __func__, status.response_code,
952 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
953 rc = -EIO;
954 goto out;
955 }
956 aq->se_bstate = hwinfo.bs;
957 if (!(hwinfo.bs == AP_BS_Q_USABLE ||
958 hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
959 AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
960 __func__, hwinfo.bs,
961 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
962 rc = -EIO;
963 goto out;
964 }
965
966 /* SE bind was successful */
967 AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
968 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
969 rc = count;
970
971 out:
972 spin_unlock_bh(&aq->lock);
973 return rc;
974 }
975
976 static DEVICE_ATTR_RW(se_bind);
977
se_associate_show(struct device * dev,struct device_attribute * attr,char * buf)978 static ssize_t se_associate_show(struct device *dev,
979 struct device_attribute *attr, char *buf)
980 {
981 struct ap_queue *aq = to_ap_queue(dev);
982 struct ap_queue_status status;
983 struct ap_tapq_hwinfo hwinfo;
984
985 if (!ap_q_supports_assoc(aq))
986 return sysfs_emit(buf, "-\n");
987
988 status = ap_test_queue(aq->qid, 1, &hwinfo);
989 if (status.response_code > AP_RESPONSE_BUSY) {
990 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
991 status.response_code,
992 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
993 return -EIO;
994 }
995
996 /* update queue's SE bind state */
997 spin_lock_bh(&aq->lock);
998 aq->se_bstate = hwinfo.bs;
999 spin_unlock_bh(&aq->lock);
1000
1001 switch (hwinfo.bs) {
1002 case AP_BS_Q_USABLE:
1003 if (aq->assoc_idx == ASSOC_IDX_INVALID) {
1004 AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
1005 return -EIO;
1006 }
1007 return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
1008 case AP_BS_Q_USABLE_NO_SECURE_KEY:
1009 if (aq->assoc_idx != ASSOC_IDX_INVALID)
1010 return sysfs_emit(buf, "association pending\n");
1011 fallthrough;
1012 default:
1013 return sysfs_emit(buf, "unassociated\n");
1014 }
1015 }
1016
se_associate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1017 static ssize_t se_associate_store(struct device *dev,
1018 struct device_attribute *attr,
1019 const char *buf, size_t count)
1020 {
1021 struct ap_queue *aq = to_ap_queue(dev);
1022 struct ap_queue_status status;
1023 struct ap_tapq_hwinfo hwinfo;
1024 unsigned int value;
1025 int rc;
1026
1027 if (!ap_q_supports_assoc(aq))
1028 return -EINVAL;
1029
1030 /* association index needs to be >= 0 */
1031 rc = kstrtouint(buf, 0, &value);
1032 if (rc)
1033 return rc;
1034 if (value >= ASSOC_IDX_INVALID)
1035 return -EINVAL;
1036
1037 /* check current SE bind state */
1038 status = ap_test_queue(aq->qid, 1, &hwinfo);
1039 if (status.response_code) {
1040 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1041 __func__, status.response_code,
1042 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1043 return -EIO;
1044 }
1045 spin_lock_bh(&aq->lock);
1046 aq->se_bstate = hwinfo.bs;
1047 if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1048 AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1049 __func__, hwinfo.bs,
1050 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1051 rc = -EINVAL;
1052 goto out;
1053 }
1054
1055 /* check SM state */
1056 if (aq->sm_state != AP_SM_STATE_IDLE) {
1057 rc = -EBUSY;
1058 goto out;
1059 }
1060
1061 /* trigger the asynchronous association request */
1062 status = ap_aapq(aq->qid, value);
1063 switch (status.response_code) {
1064 case AP_RESPONSE_NORMAL:
1065 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1066 aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1067 aq->assoc_idx = value;
1068 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1069 break;
1070 default:
1071 AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1072 __func__, status.response_code,
1073 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1074 rc = -EIO;
1075 goto out;
1076 }
1077
1078 rc = count;
1079
1080 out:
1081 spin_unlock_bh(&aq->lock);
1082 return rc;
1083 }
1084
1085 static DEVICE_ATTR_RW(se_associate);
1086
1087 static struct attribute *ap_queue_dev_sb_attrs[] = {
1088 &dev_attr_se_bind.attr,
1089 &dev_attr_se_associate.attr,
1090 NULL
1091 };
1092
1093 static struct attribute_group ap_queue_dev_sb_attr_group = {
1094 .attrs = ap_queue_dev_sb_attrs
1095 };
1096
1097 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1098 &ap_queue_dev_sb_attr_group,
1099 NULL
1100 };
1101
ap_queue_device_release(struct device * dev)1102 static void ap_queue_device_release(struct device *dev)
1103 {
1104 struct ap_queue *aq = to_ap_queue(dev);
1105
1106 spin_lock_bh(&ap_queues_lock);
1107 hash_del(&aq->hnode);
1108 spin_unlock_bh(&ap_queues_lock);
1109
1110 kfree(aq);
1111 }
1112
ap_queue_create(ap_qid_t qid,struct ap_card * ac)1113 struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac)
1114 {
1115 struct ap_queue *aq;
1116
1117 aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1118 if (!aq)
1119 return NULL;
1120 aq->card = ac;
1121 aq->ap_dev.device.release = ap_queue_device_release;
1122 aq->ap_dev.device.type = &ap_queue_type;
1123 aq->ap_dev.device_type = ac->ap_dev.device_type;
1124 /* in SE environment add bind/associate attributes group */
1125 if (ap_is_se_guest() && ap_q_supported_in_se(aq))
1126 aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1127 aq->qid = qid;
1128 spin_lock_init(&aq->lock);
1129 INIT_LIST_HEAD(&aq->pendingq);
1130 INIT_LIST_HEAD(&aq->requestq);
1131 timer_setup(&aq->timeout, ap_request_timeout, 0);
1132
1133 return aq;
1134 }
1135
ap_queue_init_reply(struct ap_queue * aq,struct ap_message * reply)1136 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1137 {
1138 aq->reply = reply;
1139
1140 spin_lock_bh(&aq->lock);
1141 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1142 spin_unlock_bh(&aq->lock);
1143 }
1144 EXPORT_SYMBOL(ap_queue_init_reply);
1145
1146 /**
1147 * ap_queue_message(): Queue a request to an AP device.
1148 * @aq: The AP device to queue the message to
1149 * @ap_msg: The message that is to be added
1150 */
ap_queue_message(struct ap_queue * aq,struct ap_message * ap_msg)1151 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1152 {
1153 int rc = 0;
1154
1155 /* msg needs to have a valid receive-callback */
1156 BUG_ON(!ap_msg->receive);
1157
1158 spin_lock_bh(&aq->lock);
1159
1160 /* only allow to queue new messages if device state is ok */
1161 if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1162 list_add_tail(&ap_msg->list, &aq->requestq);
1163 aq->requestq_count++;
1164 aq->total_request_count++;
1165 atomic64_inc(&aq->card->total_request_count);
1166 } else {
1167 rc = -ENODEV;
1168 }
1169
1170 /* Send/receive as many request from the queue as possible. */
1171 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1172
1173 spin_unlock_bh(&aq->lock);
1174
1175 return rc;
1176 }
1177 EXPORT_SYMBOL(ap_queue_message);
1178
1179 /**
1180 * ap_queue_usable(): Check if queue is usable just now.
1181 * @aq: The AP queue device to test for usability.
1182 * This function is intended for the scheduler to query if it makes
1183 * sense to enqueue a message into this AP queue device by calling
1184 * ap_queue_message(). The perspective is very short-term as the
1185 * state machine and device state(s) may change at any time.
1186 */
ap_queue_usable(struct ap_queue * aq)1187 bool ap_queue_usable(struct ap_queue *aq)
1188 {
1189 bool rc = true;
1190
1191 spin_lock_bh(&aq->lock);
1192
1193 /* check for not configured or checkstopped */
1194 if (!aq->config || aq->chkstop) {
1195 rc = false;
1196 goto unlock_and_out;
1197 }
1198
1199 /* device state needs to be ok */
1200 if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1201 rc = false;
1202 goto unlock_and_out;
1203 }
1204
1205 /* SE guest's queues additionally need to be bound */
1206 if (ap_is_se_guest()) {
1207 if (!ap_q_supported_in_se(aq)) {
1208 rc = false;
1209 goto unlock_and_out;
1210 }
1211 if (ap_q_needs_bind(aq) &&
1212 !(aq->se_bstate == AP_BS_Q_USABLE ||
1213 aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1214 rc = false;
1215 }
1216
1217 unlock_and_out:
1218 spin_unlock_bh(&aq->lock);
1219 return rc;
1220 }
1221 EXPORT_SYMBOL(ap_queue_usable);
1222
1223 /**
1224 * ap_cancel_message(): Cancel a crypto request.
1225 * @aq: The AP device that has the message queued
1226 * @ap_msg: The message that is to be removed
1227 *
1228 * Cancel a crypto request. This is done by removing the request
1229 * from the device pending or request queue. Note that the
1230 * request stays on the AP queue. When it finishes the message
1231 * reply will be discarded because the psmid can't be found.
1232 */
ap_cancel_message(struct ap_queue * aq,struct ap_message * ap_msg)1233 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1234 {
1235 struct ap_message *tmp;
1236
1237 spin_lock_bh(&aq->lock);
1238 if (!list_empty(&ap_msg->list)) {
1239 list_for_each_entry(tmp, &aq->pendingq, list)
1240 if (tmp->psmid == ap_msg->psmid) {
1241 aq->pendingq_count--;
1242 goto found;
1243 }
1244 aq->requestq_count--;
1245 found:
1246 list_del_init(&ap_msg->list);
1247 }
1248 spin_unlock_bh(&aq->lock);
1249 }
1250 EXPORT_SYMBOL(ap_cancel_message);
1251
1252 /**
1253 * __ap_flush_queue(): Flush requests.
1254 * @aq: Pointer to the AP queue
1255 *
1256 * Flush all requests from the request/pending queue of an AP device.
1257 */
__ap_flush_queue(struct ap_queue * aq)1258 static void __ap_flush_queue(struct ap_queue *aq)
1259 {
1260 struct ap_message *ap_msg, *next;
1261
1262 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1263 list_del_init(&ap_msg->list);
1264 aq->pendingq_count--;
1265 ap_msg->rc = -EAGAIN;
1266 ap_msg->receive(aq, ap_msg, NULL);
1267 }
1268 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1269 list_del_init(&ap_msg->list);
1270 aq->requestq_count--;
1271 ap_msg->rc = -EAGAIN;
1272 ap_msg->receive(aq, ap_msg, NULL);
1273 }
1274 aq->queue_count = 0;
1275 }
1276
ap_flush_queue(struct ap_queue * aq)1277 void ap_flush_queue(struct ap_queue *aq)
1278 {
1279 spin_lock_bh(&aq->lock);
1280 __ap_flush_queue(aq);
1281 spin_unlock_bh(&aq->lock);
1282 }
1283 EXPORT_SYMBOL(ap_flush_queue);
1284
ap_queue_prepare_remove(struct ap_queue * aq)1285 void ap_queue_prepare_remove(struct ap_queue *aq)
1286 {
1287 spin_lock_bh(&aq->lock);
1288 /* flush queue */
1289 __ap_flush_queue(aq);
1290 /* move queue device state to SHUTDOWN in progress */
1291 aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1292 spin_unlock_bh(&aq->lock);
1293 timer_delete_sync(&aq->timeout);
1294 }
1295
ap_queue_remove(struct ap_queue * aq)1296 void ap_queue_remove(struct ap_queue *aq)
1297 {
1298 /*
1299 * all messages have been flushed and the device state
1300 * is SHUTDOWN. Now reset with zero which also clears
1301 * the irq registration and move the device state
1302 * to the initial value AP_DEV_STATE_UNINITIATED.
1303 */
1304 spin_lock_bh(&aq->lock);
1305 ap_zapq(aq->qid, 0);
1306 aq->dev_state = AP_DEV_STATE_UNINITIATED;
1307 spin_unlock_bh(&aq->lock);
1308 }
1309
_ap_queue_init_state(struct ap_queue * aq)1310 void _ap_queue_init_state(struct ap_queue *aq)
1311 {
1312 aq->dev_state = AP_DEV_STATE_OPERATING;
1313 aq->sm_state = AP_SM_STATE_RESET_START;
1314 aq->last_err_rc = 0;
1315 aq->assoc_idx = ASSOC_IDX_INVALID;
1316 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1317 }
1318
ap_queue_init_state(struct ap_queue * aq)1319 void ap_queue_init_state(struct ap_queue *aq)
1320 {
1321 spin_lock_bh(&aq->lock);
1322 _ap_queue_init_state(aq);
1323 spin_unlock_bh(&aq->lock);
1324 }
1325 EXPORT_SYMBOL(ap_queue_init_state);
1326