1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2016, 2023
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 *
6 * Adjunct processor bus, queue related code.
7 */
8
9 #define pr_fmt(fmt) "ap: " fmt
10
11 #include <linux/export.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15
16 #define CREATE_TRACE_POINTS
17 #include <asm/trace/ap.h>
18
19 #include "ap_bus.h"
20 #include "ap_debug.h"
21
22 EXPORT_TRACEPOINT_SYMBOL(s390_ap_nqap);
23 EXPORT_TRACEPOINT_SYMBOL(s390_ap_dqap);
24
25 static void __ap_flush_queue(struct ap_queue *aq);
26
27 /*
28 * some AP queue helper functions
29 */
30
ap_q_supported_in_se(struct ap_queue * aq)31 static inline bool ap_q_supported_in_se(struct ap_queue *aq)
32 {
33 return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
34 }
35
ap_q_supports_bind(struct ap_queue * aq)36 static inline bool ap_q_supports_bind(struct ap_queue *aq)
37 {
38 return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
39 }
40
ap_q_supports_assoc(struct ap_queue * aq)41 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
42 {
43 return aq->card->hwinfo.ep11;
44 }
45
ap_q_needs_bind(struct ap_queue * aq)46 static inline bool ap_q_needs_bind(struct ap_queue *aq)
47 {
48 return ap_q_supports_bind(aq) && ap_sb_available();
49 }
50
51 /**
52 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
53 * @aq: The AP queue
54 * @ind: the notification indicator byte
55 *
56 * Enables interruption on AP queue via ap_aqic(). Based on the return
57 * value it waits a while and tests the AP queue if interrupts
58 * have been switched on using ap_test_queue().
59 */
ap_queue_enable_irq(struct ap_queue * aq,void * ind)60 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
61 {
62 union ap_qirq_ctrl qirqctrl = { .value = 0 };
63 struct ap_queue_status status;
64
65 qirqctrl.ir = 1;
66 qirqctrl.isc = AP_ISC;
67 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
68 if (status.async)
69 return -EPERM;
70 switch (status.response_code) {
71 case AP_RESPONSE_NORMAL:
72 case AP_RESPONSE_OTHERWISE_CHANGED:
73 return 0;
74 case AP_RESPONSE_Q_NOT_AVAIL:
75 case AP_RESPONSE_DECONFIGURED:
76 case AP_RESPONSE_CHECKSTOPPED:
77 case AP_RESPONSE_INVALID_ADDRESS:
78 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
79 AP_QID_CARD(aq->qid),
80 AP_QID_QUEUE(aq->qid));
81 return -EOPNOTSUPP;
82 case AP_RESPONSE_RESET_IN_PROGRESS:
83 case AP_RESPONSE_BUSY:
84 default:
85 return -EBUSY;
86 }
87 }
88
89 /**
90 * __ap_send(): Send message to adjunct processor queue.
91 * @qid: The AP queue number
92 * @psmid: The program supplied message identifier
93 * @msg: The message text
94 * @msglen: The message length
95 * @special: Special Bit
96 *
97 * Returns AP queue status structure.
98 * Condition code 1 on NQAP can't happen because the L bit is 1.
99 * Condition code 2 on NQAP also means the send is incomplete,
100 * because a segment boundary was reached. The NQAP is repeated.
101 */
102 static inline struct ap_queue_status
__ap_send(ap_qid_t qid,unsigned long psmid,void * msg,size_t msglen,int special)103 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
104 int special)
105 {
106 struct ap_queue_status status;
107
108 if (special)
109 qid |= 0x400000UL;
110
111 status = ap_nqap(qid, psmid, msg, msglen);
112
113 trace_s390_ap_nqap(AP_QID_CARD(qid), AP_QID_QUEUE(qid),
114 status.value, psmid);
115
116 return status;
117 }
118
119 /* State machine definitions and helpers */
120
ap_sm_nop(struct ap_queue * aq)121 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
122 {
123 return AP_SM_WAIT_NONE;
124 }
125
126 /**
127 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
128 * not change the state of the device.
129 * @aq: pointer to the AP queue
130 *
131 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
132 */
ap_sm_recv(struct ap_queue * aq)133 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
134 {
135 struct ap_queue_status status;
136 struct ap_message *ap_msg;
137 bool found = false;
138 size_t reslen;
139 unsigned long resgr0 = 0;
140 int parts = 0;
141
142 /*
143 * DQAP loop until response code and resgr0 indicate that
144 * the msg is totally received. As we use the very same buffer
145 * the msg is overwritten with each invocation. That's intended
146 * and the receiver of the msg is informed with a msg rc code
147 * of EMSGSIZE in such a case.
148 */
149 do {
150 status = ap_dqap(aq->qid, &aq->reply->psmid,
151 aq->reply->msg, aq->reply->bufsize,
152 &aq->reply->len, &reslen, &resgr0);
153 parts++;
154 } while (status.response_code == 0xFF && resgr0 != 0);
155
156 trace_s390_ap_dqap(AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
157 status.value, aq->reply->psmid);
158
159 switch (status.response_code) {
160 case AP_RESPONSE_NORMAL:
161 print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
162 aq->reply->msg, aq->reply->len, false);
163 aq->queue_count = max_t(int, 0, aq->queue_count - 1);
164 if (!status.queue_empty && !aq->queue_count)
165 aq->queue_count++;
166 if (aq->queue_count > 0)
167 mod_timer(&aq->timeout,
168 jiffies + aq->request_timeout);
169 list_for_each_entry(ap_msg, &aq->pendingq, list) {
170 if (ap_msg->psmid != aq->reply->psmid)
171 continue;
172 list_del_init(&ap_msg->list);
173 aq->pendingq_count--;
174 if (parts > 1) {
175 ap_msg->rc = -EMSGSIZE;
176 ap_msg->receive(aq, ap_msg, NULL);
177 } else {
178 ap_msg->receive(aq, ap_msg, aq->reply);
179 }
180 found = true;
181 break;
182 }
183 if (!found) {
184 AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
185 __func__, aq->reply->psmid,
186 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
187 }
188 fallthrough;
189 case AP_RESPONSE_NO_PENDING_REPLY:
190 if (!status.queue_empty || aq->queue_count <= 0)
191 break;
192 /* The card shouldn't forget requests but who knows. */
193 aq->queue_count = 0;
194 list_splice_init(&aq->pendingq, &aq->requestq);
195 aq->requestq_count += aq->pendingq_count;
196 pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
197 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
198 aq->pendingq_count, aq->requestq_count);
199 aq->pendingq_count = 0;
200 break;
201 default:
202 break;
203 }
204 return status;
205 }
206
207 /**
208 * ap_sm_read(): Receive pending reply messages from an AP queue.
209 * @aq: pointer to the AP queue
210 *
211 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
212 */
ap_sm_read(struct ap_queue * aq)213 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
214 {
215 struct ap_queue_status status;
216
217 if (!aq->reply)
218 return AP_SM_WAIT_NONE;
219 status = ap_sm_recv(aq);
220 if (status.async)
221 return AP_SM_WAIT_NONE;
222 switch (status.response_code) {
223 case AP_RESPONSE_NORMAL:
224 if (aq->queue_count > 0) {
225 aq->sm_state = AP_SM_STATE_WORKING;
226 return AP_SM_WAIT_AGAIN;
227 }
228 aq->sm_state = AP_SM_STATE_IDLE;
229 break;
230 case AP_RESPONSE_NO_PENDING_REPLY:
231 if (aq->queue_count > 0)
232 return status.irq_enabled ?
233 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
234 aq->sm_state = AP_SM_STATE_IDLE;
235 break;
236 default:
237 aq->dev_state = AP_DEV_STATE_ERROR;
238 aq->last_err_rc = status.response_code;
239 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
240 __func__, status.response_code,
241 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
242 return AP_SM_WAIT_NONE;
243 }
244 /* Check and maybe enable irq support (again) on this queue */
245 if (!status.irq_enabled && status.queue_empty) {
246 void *lsi_ptr = ap_airq_ptr();
247
248 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
249 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
250 return AP_SM_WAIT_AGAIN;
251 }
252 }
253 return AP_SM_WAIT_NONE;
254 }
255
256 /**
257 * ap_sm_write(): Send messages from the request queue to an AP queue.
258 * @aq: pointer to the AP queue
259 *
260 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
261 */
ap_sm_write(struct ap_queue * aq)262 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
263 {
264 struct ap_queue_status status;
265 struct ap_message *ap_msg;
266 ap_qid_t qid = aq->qid;
267
268 if (aq->requestq_count <= 0)
269 return AP_SM_WAIT_NONE;
270
271 /* Start the next request on the queue. */
272 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
273 print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
274 ap_msg->msg, ap_msg->len, false);
275 status = __ap_send(qid, ap_msg->psmid,
276 ap_msg->msg, ap_msg->len,
277 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
278 if (status.async)
279 return AP_SM_WAIT_NONE;
280 switch (status.response_code) {
281 case AP_RESPONSE_NORMAL:
282 aq->queue_count = max_t(int, 1, aq->queue_count + 1);
283 if (aq->queue_count == 1)
284 mod_timer(&aq->timeout, jiffies + aq->request_timeout);
285 list_move_tail(&ap_msg->list, &aq->pendingq);
286 aq->requestq_count--;
287 aq->pendingq_count++;
288 if (aq->queue_count < aq->card->hwinfo.qd + 1) {
289 aq->sm_state = AP_SM_STATE_WORKING;
290 return AP_SM_WAIT_AGAIN;
291 }
292 fallthrough;
293 case AP_RESPONSE_Q_FULL:
294 aq->sm_state = AP_SM_STATE_QUEUE_FULL;
295 return status.irq_enabled ?
296 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
297 case AP_RESPONSE_RESET_IN_PROGRESS:
298 aq->sm_state = AP_SM_STATE_RESET_WAIT;
299 return AP_SM_WAIT_LOW_TIMEOUT;
300 case AP_RESPONSE_INVALID_DOMAIN:
301 AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
302 fallthrough;
303 case AP_RESPONSE_MESSAGE_TOO_BIG:
304 case AP_RESPONSE_REQ_FAC_NOT_INST:
305 list_del_init(&ap_msg->list);
306 aq->requestq_count--;
307 ap_msg->rc = -EINVAL;
308 ap_msg->receive(aq, ap_msg, NULL);
309 return AP_SM_WAIT_AGAIN;
310 default:
311 aq->dev_state = AP_DEV_STATE_ERROR;
312 aq->last_err_rc = status.response_code;
313 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
314 __func__, status.response_code,
315 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
316 return AP_SM_WAIT_NONE;
317 }
318 }
319
320 /**
321 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
322 * @aq: pointer to the AP queue
323 *
324 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
325 */
ap_sm_read_write(struct ap_queue * aq)326 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
327 {
328 return min(ap_sm_read(aq), ap_sm_write(aq));
329 }
330
331 /**
332 * ap_sm_reset(): Reset an AP queue.
333 * @aq: The AP queue
334 *
335 * Submit the Reset command to an AP queue.
336 */
ap_sm_reset(struct ap_queue * aq)337 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
338 {
339 struct ap_queue_status status;
340
341 status = ap_rapq(aq->qid, aq->rapq_fbit);
342 if (status.async)
343 return AP_SM_WAIT_NONE;
344 switch (status.response_code) {
345 case AP_RESPONSE_NORMAL:
346 case AP_RESPONSE_RESET_IN_PROGRESS:
347 aq->sm_state = AP_SM_STATE_RESET_WAIT;
348 aq->rapq_fbit = 0;
349 return AP_SM_WAIT_LOW_TIMEOUT;
350 default:
351 aq->dev_state = AP_DEV_STATE_ERROR;
352 aq->last_err_rc = status.response_code;
353 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
354 __func__, status.response_code,
355 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
356 return AP_SM_WAIT_NONE;
357 }
358 }
359
360 /**
361 * ap_sm_reset_wait(): Test queue for completion of the reset operation
362 * @aq: pointer to the AP queue
363 *
364 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
365 */
ap_sm_reset_wait(struct ap_queue * aq)366 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
367 {
368 struct ap_queue_status status;
369 struct ap_tapq_hwinfo hwinfo;
370 void *lsi_ptr;
371
372 /* Get the status with TAPQ */
373 status = ap_test_queue(aq->qid, 1, &hwinfo);
374
375 switch (status.response_code) {
376 case AP_RESPONSE_NORMAL:
377 aq->se_bstate = hwinfo.bs;
378 lsi_ptr = ap_airq_ptr();
379 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
380 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
381 else
382 aq->sm_state = (aq->queue_count > 0) ?
383 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
384 return AP_SM_WAIT_AGAIN;
385 case AP_RESPONSE_BUSY:
386 case AP_RESPONSE_RESET_IN_PROGRESS:
387 return AP_SM_WAIT_LOW_TIMEOUT;
388 case AP_RESPONSE_Q_NOT_AVAIL:
389 case AP_RESPONSE_DECONFIGURED:
390 case AP_RESPONSE_CHECKSTOPPED:
391 default:
392 aq->dev_state = AP_DEV_STATE_ERROR;
393 aq->last_err_rc = status.response_code;
394 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
395 __func__, status.response_code,
396 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
397 return AP_SM_WAIT_NONE;
398 }
399 }
400
401 /**
402 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
403 * @aq: pointer to the AP queue
404 *
405 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
406 */
ap_sm_setirq_wait(struct ap_queue * aq)407 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
408 {
409 struct ap_queue_status status;
410
411 if (aq->queue_count > 0 && aq->reply)
412 /* Try to read a completed message and get the status */
413 status = ap_sm_recv(aq);
414 else
415 /* Get the status with TAPQ */
416 status = ap_tapq(aq->qid, NULL);
417
418 if (status.irq_enabled == 1) {
419 /* Irqs are now enabled */
420 aq->sm_state = (aq->queue_count > 0) ?
421 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
422 }
423
424 switch (status.response_code) {
425 case AP_RESPONSE_NORMAL:
426 if (aq->queue_count > 0)
427 return AP_SM_WAIT_AGAIN;
428 fallthrough;
429 case AP_RESPONSE_NO_PENDING_REPLY:
430 return AP_SM_WAIT_LOW_TIMEOUT;
431 default:
432 aq->dev_state = AP_DEV_STATE_ERROR;
433 aq->last_err_rc = status.response_code;
434 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
435 __func__, status.response_code,
436 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
437 return AP_SM_WAIT_NONE;
438 }
439 }
440
441 /**
442 * ap_sm_assoc_wait(): Test queue for completion of a pending
443 * association request.
444 * @aq: pointer to the AP queue
445 */
ap_sm_assoc_wait(struct ap_queue * aq)446 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
447 {
448 struct ap_queue_status status;
449 struct ap_tapq_hwinfo hwinfo;
450
451 status = ap_test_queue(aq->qid, 1, &hwinfo);
452 /* handle asynchronous error on this queue */
453 if (status.async && status.response_code) {
454 aq->dev_state = AP_DEV_STATE_ERROR;
455 aq->last_err_rc = status.response_code;
456 AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
457 __func__, status.response_code,
458 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
459 return AP_SM_WAIT_NONE;
460 }
461 if (status.response_code > AP_RESPONSE_BUSY) {
462 aq->dev_state = AP_DEV_STATE_ERROR;
463 aq->last_err_rc = status.response_code;
464 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
465 __func__, status.response_code,
466 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
467 return AP_SM_WAIT_NONE;
468 }
469
470 /* update queue's SE bind state */
471 aq->se_bstate = hwinfo.bs;
472
473 /* check bs bits */
474 switch (hwinfo.bs) {
475 case AP_BS_Q_USABLE:
476 /* association is through */
477 aq->sm_state = AP_SM_STATE_IDLE;
478 pr_debug("queue 0x%02x.%04x associated with %u\n",
479 AP_QID_CARD(aq->qid),
480 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
481 return AP_SM_WAIT_NONE;
482 case AP_BS_Q_USABLE_NO_SECURE_KEY:
483 /* association still pending */
484 return AP_SM_WAIT_LOW_TIMEOUT;
485 default:
486 /* reset from 'outside' happened or no idea at all */
487 aq->assoc_idx = ASSOC_IDX_INVALID;
488 aq->dev_state = AP_DEV_STATE_ERROR;
489 aq->last_err_rc = status.response_code;
490 AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
491 __func__, hwinfo.bs,
492 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
493 return AP_SM_WAIT_NONE;
494 }
495 }
496
497 /*
498 * AP state machine jump table
499 */
500 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
501 [AP_SM_STATE_RESET_START] = {
502 [AP_SM_EVENT_POLL] = ap_sm_reset,
503 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
504 },
505 [AP_SM_STATE_RESET_WAIT] = {
506 [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
507 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
508 },
509 [AP_SM_STATE_SETIRQ_WAIT] = {
510 [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
511 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
512 },
513 [AP_SM_STATE_IDLE] = {
514 [AP_SM_EVENT_POLL] = ap_sm_write,
515 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
516 },
517 [AP_SM_STATE_WORKING] = {
518 [AP_SM_EVENT_POLL] = ap_sm_read_write,
519 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
520 },
521 [AP_SM_STATE_QUEUE_FULL] = {
522 [AP_SM_EVENT_POLL] = ap_sm_read,
523 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
524 },
525 [AP_SM_STATE_ASSOC_WAIT] = {
526 [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
527 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
528 },
529 };
530
ap_sm_event(struct ap_queue * aq,enum ap_sm_event event)531 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
532 {
533 if (aq->config && !aq->chkstop &&
534 aq->dev_state > AP_DEV_STATE_UNINITIATED)
535 return ap_jumptable[aq->sm_state][event](aq);
536 else
537 return AP_SM_WAIT_NONE;
538 }
539
ap_sm_event_loop(struct ap_queue * aq,enum ap_sm_event event)540 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
541 {
542 enum ap_sm_wait wait;
543
544 while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
545 ;
546 return wait;
547 }
548
549 /*
550 * AP queue related attributes.
551 */
request_count_show(struct device * dev,struct device_attribute * attr,char * buf)552 static ssize_t request_count_show(struct device *dev,
553 struct device_attribute *attr,
554 char *buf)
555 {
556 struct ap_queue *aq = to_ap_queue(dev);
557 bool valid = false;
558 u64 req_cnt;
559
560 spin_lock_bh(&aq->lock);
561 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
562 req_cnt = aq->total_request_count;
563 valid = true;
564 }
565 spin_unlock_bh(&aq->lock);
566
567 if (valid)
568 return sysfs_emit(buf, "%llu\n", req_cnt);
569 else
570 return sysfs_emit(buf, "-\n");
571 }
572
request_count_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)573 static ssize_t request_count_store(struct device *dev,
574 struct device_attribute *attr,
575 const char *buf, size_t count)
576 {
577 struct ap_queue *aq = to_ap_queue(dev);
578
579 spin_lock_bh(&aq->lock);
580 aq->total_request_count = 0;
581 spin_unlock_bh(&aq->lock);
582
583 return count;
584 }
585
586 static DEVICE_ATTR_RW(request_count);
587
requestq_count_show(struct device * dev,struct device_attribute * attr,char * buf)588 static ssize_t requestq_count_show(struct device *dev,
589 struct device_attribute *attr, char *buf)
590 {
591 struct ap_queue *aq = to_ap_queue(dev);
592 unsigned int reqq_cnt = 0;
593
594 spin_lock_bh(&aq->lock);
595 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
596 reqq_cnt = aq->requestq_count;
597 spin_unlock_bh(&aq->lock);
598 return sysfs_emit(buf, "%d\n", reqq_cnt);
599 }
600
601 static DEVICE_ATTR_RO(requestq_count);
602
pendingq_count_show(struct device * dev,struct device_attribute * attr,char * buf)603 static ssize_t pendingq_count_show(struct device *dev,
604 struct device_attribute *attr, char *buf)
605 {
606 struct ap_queue *aq = to_ap_queue(dev);
607 unsigned int penq_cnt = 0;
608
609 spin_lock_bh(&aq->lock);
610 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
611 penq_cnt = aq->pendingq_count;
612 spin_unlock_bh(&aq->lock);
613 return sysfs_emit(buf, "%d\n", penq_cnt);
614 }
615
616 static DEVICE_ATTR_RO(pendingq_count);
617
reset_show(struct device * dev,struct device_attribute * attr,char * buf)618 static ssize_t reset_show(struct device *dev,
619 struct device_attribute *attr, char *buf)
620 {
621 struct ap_queue *aq = to_ap_queue(dev);
622 int rc = 0;
623
624 spin_lock_bh(&aq->lock);
625 switch (aq->sm_state) {
626 case AP_SM_STATE_RESET_START:
627 case AP_SM_STATE_RESET_WAIT:
628 rc = sysfs_emit(buf, "Reset in progress.\n");
629 break;
630 case AP_SM_STATE_WORKING:
631 case AP_SM_STATE_QUEUE_FULL:
632 rc = sysfs_emit(buf, "Reset Timer armed.\n");
633 break;
634 default:
635 rc = sysfs_emit(buf, "No Reset Timer set.\n");
636 }
637 spin_unlock_bh(&aq->lock);
638 return rc;
639 }
640
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)641 static ssize_t reset_store(struct device *dev,
642 struct device_attribute *attr,
643 const char *buf, size_t count)
644 {
645 struct ap_queue *aq = to_ap_queue(dev);
646
647 spin_lock_bh(&aq->lock);
648 __ap_flush_queue(aq);
649 aq->sm_state = AP_SM_STATE_RESET_START;
650 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
651 spin_unlock_bh(&aq->lock);
652
653 AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
654 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
655
656 return count;
657 }
658
659 static DEVICE_ATTR_RW(reset);
660
interrupt_show(struct device * dev,struct device_attribute * attr,char * buf)661 static ssize_t interrupt_show(struct device *dev,
662 struct device_attribute *attr, char *buf)
663 {
664 struct ap_queue *aq = to_ap_queue(dev);
665 struct ap_queue_status status;
666 int rc = 0;
667
668 spin_lock_bh(&aq->lock);
669 if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
670 rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
671 } else {
672 status = ap_tapq(aq->qid, NULL);
673 if (status.irq_enabled)
674 rc = sysfs_emit(buf, "Interrupts enabled.\n");
675 else
676 rc = sysfs_emit(buf, "Interrupts disabled.\n");
677 }
678 spin_unlock_bh(&aq->lock);
679
680 return rc;
681 }
682
683 static DEVICE_ATTR_RO(interrupt);
684
config_show(struct device * dev,struct device_attribute * attr,char * buf)685 static ssize_t config_show(struct device *dev,
686 struct device_attribute *attr, char *buf)
687 {
688 struct ap_queue *aq = to_ap_queue(dev);
689 int rc;
690
691 spin_lock_bh(&aq->lock);
692 rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
693 spin_unlock_bh(&aq->lock);
694 return rc;
695 }
696
697 static DEVICE_ATTR_RO(config);
698
chkstop_show(struct device * dev,struct device_attribute * attr,char * buf)699 static ssize_t chkstop_show(struct device *dev,
700 struct device_attribute *attr, char *buf)
701 {
702 struct ap_queue *aq = to_ap_queue(dev);
703 int rc;
704
705 spin_lock_bh(&aq->lock);
706 rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
707 spin_unlock_bh(&aq->lock);
708 return rc;
709 }
710
711 static DEVICE_ATTR_RO(chkstop);
712
ap_functions_show(struct device * dev,struct device_attribute * attr,char * buf)713 static ssize_t ap_functions_show(struct device *dev,
714 struct device_attribute *attr, char *buf)
715 {
716 struct ap_queue *aq = to_ap_queue(dev);
717 struct ap_queue_status status;
718 struct ap_tapq_hwinfo hwinfo;
719
720 status = ap_test_queue(aq->qid, 1, &hwinfo);
721 if (status.response_code > AP_RESPONSE_BUSY) {
722 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
723 status.response_code,
724 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
725 return -EIO;
726 }
727
728 return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
729 }
730
731 static DEVICE_ATTR_RO(ap_functions);
732
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)733 static ssize_t driver_override_show(struct device *dev,
734 struct device_attribute *attr,
735 char *buf)
736 {
737 guard(spinlock)(&dev->driver_override.lock);
738 return sysfs_emit(buf, "%s\n", dev->driver_override.name ?: "");
739 }
740
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)741 static ssize_t driver_override_store(struct device *dev,
742 struct device_attribute *attr,
743 const char *buf, size_t count)
744 {
745 int rc = -EINVAL;
746 bool old_value;
747
748 if (mutex_lock_interruptible(&ap_attr_mutex))
749 return -ERESTARTSYS;
750
751 /* Do not allow driver override if apmask/aqmask is in use */
752 if (ap_apmask_aqmask_in_use)
753 goto out;
754
755 old_value = device_has_driver_override(dev);
756 rc = __device_set_driver_override(dev, buf, count);
757 if (rc)
758 goto out;
759 if (old_value && !device_has_driver_override(dev))
760 --ap_driver_override_ctr;
761 else if (!old_value && device_has_driver_override(dev))
762 ++ap_driver_override_ctr;
763
764 rc = count;
765
766 out:
767 mutex_unlock(&ap_attr_mutex);
768 return rc;
769 }
770
771 static DEVICE_ATTR_RW(driver_override);
772
773 #ifdef CONFIG_AP_DEBUG
states_show(struct device * dev,struct device_attribute * attr,char * buf)774 static ssize_t states_show(struct device *dev,
775 struct device_attribute *attr, char *buf)
776 {
777 struct ap_queue *aq = to_ap_queue(dev);
778 int rc = 0;
779
780 spin_lock_bh(&aq->lock);
781 /* queue device state */
782 switch (aq->dev_state) {
783 case AP_DEV_STATE_UNINITIATED:
784 rc = sysfs_emit(buf, "UNINITIATED\n");
785 break;
786 case AP_DEV_STATE_OPERATING:
787 rc = sysfs_emit(buf, "OPERATING");
788 break;
789 case AP_DEV_STATE_SHUTDOWN:
790 rc = sysfs_emit(buf, "SHUTDOWN");
791 break;
792 case AP_DEV_STATE_ERROR:
793 rc = sysfs_emit(buf, "ERROR");
794 break;
795 default:
796 rc = sysfs_emit(buf, "UNKNOWN");
797 }
798 /* state machine state */
799 if (aq->dev_state) {
800 switch (aq->sm_state) {
801 case AP_SM_STATE_RESET_START:
802 rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
803 break;
804 case AP_SM_STATE_RESET_WAIT:
805 rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
806 break;
807 case AP_SM_STATE_SETIRQ_WAIT:
808 rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
809 break;
810 case AP_SM_STATE_IDLE:
811 rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
812 break;
813 case AP_SM_STATE_WORKING:
814 rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
815 break;
816 case AP_SM_STATE_QUEUE_FULL:
817 rc += sysfs_emit_at(buf, rc, " [FULL]\n");
818 break;
819 case AP_SM_STATE_ASSOC_WAIT:
820 rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
821 break;
822 default:
823 rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
824 }
825 }
826 spin_unlock_bh(&aq->lock);
827
828 return rc;
829 }
830 static DEVICE_ATTR_RO(states);
831
last_err_rc_show(struct device * dev,struct device_attribute * attr,char * buf)832 static ssize_t last_err_rc_show(struct device *dev,
833 struct device_attribute *attr, char *buf)
834 {
835 struct ap_queue *aq = to_ap_queue(dev);
836 int rc;
837
838 spin_lock_bh(&aq->lock);
839 rc = aq->last_err_rc;
840 spin_unlock_bh(&aq->lock);
841
842 switch (rc) {
843 case AP_RESPONSE_NORMAL:
844 return sysfs_emit(buf, "NORMAL\n");
845 case AP_RESPONSE_Q_NOT_AVAIL:
846 return sysfs_emit(buf, "Q_NOT_AVAIL\n");
847 case AP_RESPONSE_RESET_IN_PROGRESS:
848 return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
849 case AP_RESPONSE_DECONFIGURED:
850 return sysfs_emit(buf, "DECONFIGURED\n");
851 case AP_RESPONSE_CHECKSTOPPED:
852 return sysfs_emit(buf, "CHECKSTOPPED\n");
853 case AP_RESPONSE_BUSY:
854 return sysfs_emit(buf, "BUSY\n");
855 case AP_RESPONSE_INVALID_ADDRESS:
856 return sysfs_emit(buf, "INVALID_ADDRESS\n");
857 case AP_RESPONSE_OTHERWISE_CHANGED:
858 return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
859 case AP_RESPONSE_Q_FULL:
860 return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
861 case AP_RESPONSE_INDEX_TOO_BIG:
862 return sysfs_emit(buf, "INDEX_TOO_BIG\n");
863 case AP_RESPONSE_NO_FIRST_PART:
864 return sysfs_emit(buf, "NO_FIRST_PART\n");
865 case AP_RESPONSE_MESSAGE_TOO_BIG:
866 return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
867 case AP_RESPONSE_REQ_FAC_NOT_INST:
868 return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
869 default:
870 return sysfs_emit(buf, "response code %d\n", rc);
871 }
872 }
873 static DEVICE_ATTR_RO(last_err_rc);
874 #endif
875
876 static struct attribute *ap_queue_dev_attrs[] = {
877 &dev_attr_request_count.attr,
878 &dev_attr_requestq_count.attr,
879 &dev_attr_pendingq_count.attr,
880 &dev_attr_reset.attr,
881 &dev_attr_interrupt.attr,
882 &dev_attr_config.attr,
883 &dev_attr_chkstop.attr,
884 &dev_attr_ap_functions.attr,
885 &dev_attr_driver_override.attr,
886 #ifdef CONFIG_AP_DEBUG
887 &dev_attr_states.attr,
888 &dev_attr_last_err_rc.attr,
889 #endif
890 NULL
891 };
892
893 static struct attribute_group ap_queue_dev_attr_group = {
894 .attrs = ap_queue_dev_attrs
895 };
896
897 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
898 &ap_queue_dev_attr_group,
899 NULL
900 };
901
902 static struct device_type ap_queue_type = {
903 .name = "ap_queue",
904 .groups = ap_queue_dev_attr_groups,
905 };
906
se_bind_show(struct device * dev,struct device_attribute * attr,char * buf)907 static ssize_t se_bind_show(struct device *dev,
908 struct device_attribute *attr, char *buf)
909 {
910 struct ap_queue *aq = to_ap_queue(dev);
911 struct ap_queue_status status;
912 struct ap_tapq_hwinfo hwinfo;
913
914 if (!ap_q_supports_bind(aq))
915 return sysfs_emit(buf, "-\n");
916
917 status = ap_test_queue(aq->qid, 1, &hwinfo);
918 if (status.response_code > AP_RESPONSE_BUSY) {
919 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
920 status.response_code,
921 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
922 return -EIO;
923 }
924
925 /* update queue's SE bind state */
926 spin_lock_bh(&aq->lock);
927 aq->se_bstate = hwinfo.bs;
928 spin_unlock_bh(&aq->lock);
929
930 switch (hwinfo.bs) {
931 case AP_BS_Q_USABLE:
932 case AP_BS_Q_USABLE_NO_SECURE_KEY:
933 return sysfs_emit(buf, "bound\n");
934 default:
935 return sysfs_emit(buf, "unbound\n");
936 }
937 }
938
se_bind_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)939 static ssize_t se_bind_store(struct device *dev,
940 struct device_attribute *attr,
941 const char *buf, size_t count)
942 {
943 struct ap_queue *aq = to_ap_queue(dev);
944 struct ap_queue_status status;
945 struct ap_tapq_hwinfo hwinfo;
946 bool value;
947 int rc;
948
949 if (!ap_q_supports_bind(aq))
950 return -EINVAL;
951
952 /* only 0 (unbind) and 1 (bind) allowed */
953 rc = kstrtobool(buf, &value);
954 if (rc)
955 return rc;
956
957 if (!value) {
958 /* Unbind. Set F bit arg and trigger RAPQ */
959 spin_lock_bh(&aq->lock);
960 __ap_flush_queue(aq);
961 aq->rapq_fbit = 1;
962 _ap_queue_init_state(aq);
963 rc = count;
964 goto out;
965 }
966
967 /* Bind. Check current SE bind state */
968 status = ap_test_queue(aq->qid, 1, &hwinfo);
969 if (status.response_code) {
970 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
971 __func__, status.response_code,
972 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
973 return -EIO;
974 }
975
976 /* Update BS state */
977 spin_lock_bh(&aq->lock);
978 aq->se_bstate = hwinfo.bs;
979 if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
980 AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
981 __func__, hwinfo.bs,
982 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
983 rc = -EINVAL;
984 goto out;
985 }
986
987 /* Check SM state */
988 if (aq->sm_state < AP_SM_STATE_IDLE) {
989 rc = -EBUSY;
990 goto out;
991 }
992
993 /* invoke BAPQ */
994 status = ap_bapq(aq->qid);
995 if (status.response_code) {
996 AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
997 __func__, status.response_code,
998 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
999 rc = -EIO;
1000 goto out;
1001 }
1002 aq->assoc_idx = ASSOC_IDX_INVALID;
1003
1004 /* verify SE bind state */
1005 status = ap_test_queue(aq->qid, 1, &hwinfo);
1006 if (status.response_code) {
1007 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1008 __func__, status.response_code,
1009 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1010 rc = -EIO;
1011 goto out;
1012 }
1013 aq->se_bstate = hwinfo.bs;
1014 if (!(hwinfo.bs == AP_BS_Q_USABLE ||
1015 hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
1016 AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
1017 __func__, hwinfo.bs,
1018 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1019 rc = -EIO;
1020 goto out;
1021 }
1022
1023 /* SE bind was successful */
1024 AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
1025 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1026 rc = count;
1027
1028 out:
1029 spin_unlock_bh(&aq->lock);
1030 return rc;
1031 }
1032
1033 static DEVICE_ATTR_RW(se_bind);
1034
se_associate_show(struct device * dev,struct device_attribute * attr,char * buf)1035 static ssize_t se_associate_show(struct device *dev,
1036 struct device_attribute *attr, char *buf)
1037 {
1038 struct ap_queue *aq = to_ap_queue(dev);
1039 struct ap_queue_status status;
1040 struct ap_tapq_hwinfo hwinfo;
1041
1042 if (!ap_q_supports_assoc(aq))
1043 return sysfs_emit(buf, "-\n");
1044
1045 status = ap_test_queue(aq->qid, 1, &hwinfo);
1046 if (status.response_code > AP_RESPONSE_BUSY) {
1047 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
1048 status.response_code,
1049 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1050 return -EIO;
1051 }
1052
1053 /* update queue's SE bind state */
1054 spin_lock_bh(&aq->lock);
1055 aq->se_bstate = hwinfo.bs;
1056 spin_unlock_bh(&aq->lock);
1057
1058 switch (hwinfo.bs) {
1059 case AP_BS_Q_USABLE:
1060 if (aq->assoc_idx == ASSOC_IDX_INVALID) {
1061 AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
1062 return -EIO;
1063 }
1064 return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
1065 case AP_BS_Q_USABLE_NO_SECURE_KEY:
1066 if (aq->assoc_idx != ASSOC_IDX_INVALID)
1067 return sysfs_emit(buf, "association pending\n");
1068 fallthrough;
1069 default:
1070 return sysfs_emit(buf, "unassociated\n");
1071 }
1072 }
1073
se_associate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1074 static ssize_t se_associate_store(struct device *dev,
1075 struct device_attribute *attr,
1076 const char *buf, size_t count)
1077 {
1078 struct ap_queue *aq = to_ap_queue(dev);
1079 struct ap_queue_status status;
1080 struct ap_tapq_hwinfo hwinfo;
1081 unsigned int value;
1082 int rc;
1083
1084 if (!ap_q_supports_assoc(aq))
1085 return -EINVAL;
1086
1087 /* association index needs to be >= 0 */
1088 rc = kstrtouint(buf, 0, &value);
1089 if (rc)
1090 return rc;
1091 if (value >= ASSOC_IDX_INVALID)
1092 return -EINVAL;
1093
1094 /* check current SE bind state */
1095 status = ap_test_queue(aq->qid, 1, &hwinfo);
1096 if (status.response_code) {
1097 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1098 __func__, status.response_code,
1099 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1100 return -EIO;
1101 }
1102 spin_lock_bh(&aq->lock);
1103 aq->se_bstate = hwinfo.bs;
1104 if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1105 AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1106 __func__, hwinfo.bs,
1107 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1108 rc = -EINVAL;
1109 goto out;
1110 }
1111
1112 /* check SM state */
1113 if (aq->sm_state != AP_SM_STATE_IDLE) {
1114 rc = -EBUSY;
1115 goto out;
1116 }
1117
1118 /* trigger the asynchronous association request */
1119 status = ap_aapq(aq->qid, value);
1120 switch (status.response_code) {
1121 case AP_RESPONSE_NORMAL:
1122 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1123 aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1124 aq->assoc_idx = value;
1125 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1126 break;
1127 default:
1128 AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1129 __func__, status.response_code,
1130 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1131 rc = -EIO;
1132 goto out;
1133 }
1134
1135 rc = count;
1136
1137 out:
1138 spin_unlock_bh(&aq->lock);
1139 return rc;
1140 }
1141
1142 static DEVICE_ATTR_RW(se_associate);
1143
1144 static struct attribute *ap_queue_dev_sb_attrs[] = {
1145 &dev_attr_se_bind.attr,
1146 &dev_attr_se_associate.attr,
1147 NULL
1148 };
1149
1150 static struct attribute_group ap_queue_dev_sb_attr_group = {
1151 .attrs = ap_queue_dev_sb_attrs
1152 };
1153
1154 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1155 &ap_queue_dev_sb_attr_group,
1156 NULL
1157 };
1158
ap_queue_device_release(struct device * dev)1159 static void ap_queue_device_release(struct device *dev)
1160 {
1161 struct ap_queue *aq = to_ap_queue(dev);
1162
1163 spin_lock_bh(&ap_queues_lock);
1164 hash_del(&aq->hnode);
1165 spin_unlock_bh(&ap_queues_lock);
1166
1167 kfree(aq);
1168 }
1169
ap_queue_create(ap_qid_t qid,struct ap_card * ac)1170 struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac)
1171 {
1172 struct ap_queue *aq;
1173
1174 aq = kzalloc_obj(*aq);
1175 if (!aq)
1176 return NULL;
1177 aq->card = ac;
1178 aq->ap_dev.device.release = ap_queue_device_release;
1179 aq->ap_dev.device.type = &ap_queue_type;
1180 aq->ap_dev.device_type = ac->ap_dev.device_type;
1181 /* in SE environment add bind/associate attributes group */
1182 if (ap_is_se_guest() && ap_q_supported_in_se(aq))
1183 aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1184 aq->qid = qid;
1185 spin_lock_init(&aq->lock);
1186 INIT_LIST_HEAD(&aq->pendingq);
1187 INIT_LIST_HEAD(&aq->requestq);
1188 timer_setup(&aq->timeout, ap_request_timeout, 0);
1189
1190 return aq;
1191 }
1192
ap_queue_init_reply(struct ap_queue * aq,struct ap_message * reply)1193 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1194 {
1195 aq->reply = reply;
1196
1197 spin_lock_bh(&aq->lock);
1198 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1199 spin_unlock_bh(&aq->lock);
1200 }
1201 EXPORT_SYMBOL(ap_queue_init_reply);
1202
1203 /**
1204 * ap_queue_message(): Queue a request to an AP device.
1205 * @aq: The AP device to queue the message to
1206 * @ap_msg: The message that is to be added
1207 */
ap_queue_message(struct ap_queue * aq,struct ap_message * ap_msg)1208 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1209 {
1210 int rc = 0;
1211
1212 /* msg needs to have a valid receive-callback */
1213 BUG_ON(!ap_msg->receive);
1214
1215 spin_lock_bh(&aq->lock);
1216
1217 /* only allow to queue new messages if device state is ok */
1218 if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1219 list_add_tail(&ap_msg->list, &aq->requestq);
1220 aq->requestq_count++;
1221 aq->total_request_count++;
1222 atomic64_inc(&aq->card->total_request_count);
1223 } else {
1224 rc = -ENODEV;
1225 }
1226
1227 /* Send/receive as many request from the queue as possible. */
1228 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1229
1230 spin_unlock_bh(&aq->lock);
1231
1232 return rc;
1233 }
1234 EXPORT_SYMBOL(ap_queue_message);
1235
1236 /**
1237 * ap_queue_usable(): Check if queue is usable just now.
1238 * @aq: The AP queue device to test for usability.
1239 * This function is intended for the scheduler to query if it makes
1240 * sense to enqueue a message into this AP queue device by calling
1241 * ap_queue_message(). The perspective is very short-term as the
1242 * state machine and device state(s) may change at any time.
1243 */
ap_queue_usable(struct ap_queue * aq)1244 bool ap_queue_usable(struct ap_queue *aq)
1245 {
1246 bool rc = true;
1247
1248 spin_lock_bh(&aq->lock);
1249
1250 /* check for not configured or checkstopped */
1251 if (!aq->config || aq->chkstop) {
1252 rc = false;
1253 goto unlock_and_out;
1254 }
1255
1256 /* device state needs to be ok */
1257 if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1258 rc = false;
1259 goto unlock_and_out;
1260 }
1261
1262 /* SE guest's queues additionally need to be bound */
1263 if (ap_is_se_guest()) {
1264 if (!ap_q_supported_in_se(aq)) {
1265 rc = false;
1266 goto unlock_and_out;
1267 }
1268 if (ap_q_needs_bind(aq) &&
1269 !(aq->se_bstate == AP_BS_Q_USABLE ||
1270 aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1271 rc = false;
1272 }
1273
1274 unlock_and_out:
1275 spin_unlock_bh(&aq->lock);
1276 return rc;
1277 }
1278 EXPORT_SYMBOL(ap_queue_usable);
1279
1280 /**
1281 * ap_cancel_message(): Cancel a crypto request.
1282 * @aq: The AP device that has the message queued
1283 * @ap_msg: The message that is to be removed
1284 *
1285 * Cancel a crypto request. This is done by removing the request
1286 * from the device pending or request queue. Note that the
1287 * request stays on the AP queue. When it finishes the message
1288 * reply will be discarded because the psmid can't be found.
1289 */
ap_cancel_message(struct ap_queue * aq,struct ap_message * ap_msg)1290 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1291 {
1292 struct ap_message *tmp;
1293
1294 spin_lock_bh(&aq->lock);
1295 if (!list_empty(&ap_msg->list)) {
1296 list_for_each_entry(tmp, &aq->pendingq, list)
1297 if (tmp->psmid == ap_msg->psmid) {
1298 aq->pendingq_count--;
1299 goto found;
1300 }
1301 aq->requestq_count--;
1302 found:
1303 list_del_init(&ap_msg->list);
1304 }
1305 spin_unlock_bh(&aq->lock);
1306 }
1307 EXPORT_SYMBOL(ap_cancel_message);
1308
1309 /**
1310 * __ap_flush_queue(): Flush requests.
1311 * @aq: Pointer to the AP queue
1312 *
1313 * Flush all requests from the request/pending queue of an AP device.
1314 */
__ap_flush_queue(struct ap_queue * aq)1315 static void __ap_flush_queue(struct ap_queue *aq)
1316 {
1317 struct ap_message *ap_msg, *next;
1318
1319 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1320 list_del_init(&ap_msg->list);
1321 aq->pendingq_count--;
1322 ap_msg->rc = -EAGAIN;
1323 ap_msg->receive(aq, ap_msg, NULL);
1324 }
1325 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1326 list_del_init(&ap_msg->list);
1327 aq->requestq_count--;
1328 ap_msg->rc = -EAGAIN;
1329 ap_msg->receive(aq, ap_msg, NULL);
1330 }
1331 aq->queue_count = 0;
1332 }
1333
ap_flush_queue(struct ap_queue * aq)1334 void ap_flush_queue(struct ap_queue *aq)
1335 {
1336 spin_lock_bh(&aq->lock);
1337 __ap_flush_queue(aq);
1338 spin_unlock_bh(&aq->lock);
1339 }
1340 EXPORT_SYMBOL(ap_flush_queue);
1341
ap_queue_prepare_remove(struct ap_queue * aq)1342 void ap_queue_prepare_remove(struct ap_queue *aq)
1343 {
1344 spin_lock_bh(&aq->lock);
1345 /* flush queue */
1346 __ap_flush_queue(aq);
1347 /* move queue device state to SHUTDOWN in progress */
1348 aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1349 spin_unlock_bh(&aq->lock);
1350 timer_delete_sync(&aq->timeout);
1351 }
1352
ap_queue_remove(struct ap_queue * aq)1353 void ap_queue_remove(struct ap_queue *aq)
1354 {
1355 /*
1356 * all messages have been flushed and the device state
1357 * is SHUTDOWN. Now reset with zero which also clears
1358 * the irq registration and move the device state
1359 * to the initial value AP_DEV_STATE_UNINITIATED.
1360 */
1361 spin_lock_bh(&aq->lock);
1362 ap_zapq(aq->qid, 0);
1363 aq->dev_state = AP_DEV_STATE_UNINITIATED;
1364 spin_unlock_bh(&aq->lock);
1365 }
1366
_ap_queue_init_state(struct ap_queue * aq)1367 void _ap_queue_init_state(struct ap_queue *aq)
1368 {
1369 aq->dev_state = AP_DEV_STATE_OPERATING;
1370 aq->sm_state = AP_SM_STATE_RESET_START;
1371 aq->last_err_rc = 0;
1372 aq->assoc_idx = ASSOC_IDX_INVALID;
1373 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1374 }
1375
ap_queue_init_state(struct ap_queue * aq)1376 void ap_queue_init_state(struct ap_queue *aq)
1377 {
1378 spin_lock_bh(&aq->lock);
1379 _ap_queue_init_state(aq);
1380 spin_unlock_bh(&aq->lock);
1381 }
1382 EXPORT_SYMBOL(ap_queue_init_state);
1383