1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2016
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 *
6 * Adjunct processor bus, queue related code.
7 */
8
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18
19 static void __ap_flush_queue(struct ap_queue *aq);
20
21 /**
22 * ap_queue_enable_interruption(): Enable interruption on an AP queue.
23 * @qid: The AP queue number
24 * @ind: the notification indicator byte
25 *
26 * Enables interruption on AP queue via ap_aqic(). Based on the return
27 * value it waits a while and tests the AP queue if interrupts
28 * have been switched on using ap_test_queue().
29 */
ap_queue_enable_interruption(struct ap_queue * aq,void * ind)30 static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
31 {
32 struct ap_queue_status status;
33 struct ap_qirq_ctrl qirqctrl = { 0 };
34
35 qirqctrl.ir = 1;
36 qirqctrl.isc = AP_ISC;
37 status = ap_aqic(aq->qid, qirqctrl, ind);
38 switch (status.response_code) {
39 case AP_RESPONSE_NORMAL:
40 case AP_RESPONSE_OTHERWISE_CHANGED:
41 return 0;
42 case AP_RESPONSE_Q_NOT_AVAIL:
43 case AP_RESPONSE_DECONFIGURED:
44 case AP_RESPONSE_CHECKSTOPPED:
45 case AP_RESPONSE_INVALID_ADDRESS:
46 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
47 AP_QID_CARD(aq->qid),
48 AP_QID_QUEUE(aq->qid));
49 return -EOPNOTSUPP;
50 case AP_RESPONSE_RESET_IN_PROGRESS:
51 case AP_RESPONSE_BUSY:
52 default:
53 return -EBUSY;
54 }
55 }
56
57 /**
58 * __ap_send(): Send message to adjunct processor queue.
59 * @qid: The AP queue number
60 * @psmid: The program supplied message identifier
61 * @msg: The message text
62 * @length: The message length
63 * @special: Special Bit
64 *
65 * Returns AP queue status structure.
66 * Condition code 1 on NQAP can't happen because the L bit is 1.
67 * Condition code 2 on NQAP also means the send is incomplete,
68 * because a segment boundary was reached. The NQAP is repeated.
69 */
70 static inline struct ap_queue_status
__ap_send(ap_qid_t qid,unsigned long long psmid,void * msg,size_t length,int special)71 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
72 int special)
73 {
74 if (special)
75 qid |= 0x400000UL;
76 return ap_nqap(qid, psmid, msg, length);
77 }
78
ap_send(ap_qid_t qid,unsigned long long psmid,void * msg,size_t length)79 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
80 {
81 struct ap_queue_status status;
82
83 status = __ap_send(qid, psmid, msg, length, 0);
84 switch (status.response_code) {
85 case AP_RESPONSE_NORMAL:
86 return 0;
87 case AP_RESPONSE_Q_FULL:
88 case AP_RESPONSE_RESET_IN_PROGRESS:
89 return -EBUSY;
90 case AP_RESPONSE_REQ_FAC_NOT_INST:
91 return -EINVAL;
92 default: /* Device is gone. */
93 return -ENODEV;
94 }
95 }
96 EXPORT_SYMBOL(ap_send);
97
ap_recv(ap_qid_t qid,unsigned long long * psmid,void * msg,size_t length)98 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
99 {
100 struct ap_queue_status status;
101
102 if (msg == NULL)
103 return -EINVAL;
104 status = ap_dqap(qid, psmid, msg, length);
105 switch (status.response_code) {
106 case AP_RESPONSE_NORMAL:
107 return 0;
108 case AP_RESPONSE_NO_PENDING_REPLY:
109 if (status.queue_empty)
110 return -ENOENT;
111 return -EBUSY;
112 case AP_RESPONSE_RESET_IN_PROGRESS:
113 return -EBUSY;
114 default:
115 return -ENODEV;
116 }
117 }
118 EXPORT_SYMBOL(ap_recv);
119
120 /* State machine definitions and helpers */
121
ap_sm_nop(struct ap_queue * aq)122 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
123 {
124 return AP_SM_WAIT_NONE;
125 }
126
127 /**
128 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
129 * not change the state of the device.
130 * @aq: pointer to the AP queue
131 *
132 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
133 */
ap_sm_recv(struct ap_queue * aq)134 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
135 {
136 struct ap_queue_status status;
137 struct ap_message *ap_msg;
138
139 status = ap_dqap(aq->qid, &aq->reply->psmid,
140 aq->reply->msg, aq->reply->len);
141 switch (status.response_code) {
142 case AP_RESPONSE_NORMAL:
143 aq->queue_count--;
144 if (aq->queue_count > 0)
145 mod_timer(&aq->timeout,
146 jiffies + aq->request_timeout);
147 list_for_each_entry(ap_msg, &aq->pendingq, list) {
148 if (ap_msg->psmid != aq->reply->psmid)
149 continue;
150 list_del_init(&ap_msg->list);
151 aq->pendingq_count--;
152 ap_msg->receive(aq, ap_msg, aq->reply);
153 break;
154 }
155 fallthrough;
156 case AP_RESPONSE_NO_PENDING_REPLY:
157 if (!status.queue_empty || aq->queue_count <= 0)
158 break;
159 /* The card shouldn't forget requests but who knows. */
160 aq->queue_count = 0;
161 list_splice_init(&aq->pendingq, &aq->requestq);
162 aq->requestq_count += aq->pendingq_count;
163 aq->pendingq_count = 0;
164 break;
165 default:
166 break;
167 }
168 return status;
169 }
170
171 /**
172 * ap_sm_read(): Receive pending reply messages from an AP queue.
173 * @aq: pointer to the AP queue
174 *
175 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
176 */
ap_sm_read(struct ap_queue * aq)177 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
178 {
179 struct ap_queue_status status;
180
181 if (!aq->reply)
182 return AP_SM_WAIT_NONE;
183 status = ap_sm_recv(aq);
184 switch (status.response_code) {
185 case AP_RESPONSE_NORMAL:
186 if (aq->queue_count > 0) {
187 aq->sm_state = AP_SM_STATE_WORKING;
188 return AP_SM_WAIT_AGAIN;
189 }
190 aq->sm_state = AP_SM_STATE_IDLE;
191 return AP_SM_WAIT_NONE;
192 case AP_RESPONSE_NO_PENDING_REPLY:
193 if (aq->queue_count > 0)
194 return AP_SM_WAIT_INTERRUPT;
195 aq->sm_state = AP_SM_STATE_IDLE;
196 return AP_SM_WAIT_NONE;
197 default:
198 aq->dev_state = AP_DEV_STATE_ERROR;
199 aq->last_err_rc = status.response_code;
200 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
201 __func__, status.response_code,
202 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
203 return AP_SM_WAIT_NONE;
204 }
205 }
206
207 /**
208 * ap_sm_write(): Send messages from the request queue to an AP queue.
209 * @aq: pointer to the AP queue
210 *
211 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
212 */
ap_sm_write(struct ap_queue * aq)213 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
214 {
215 struct ap_queue_status status;
216 struct ap_message *ap_msg;
217 ap_qid_t qid = aq->qid;
218
219 if (aq->requestq_count <= 0)
220 return AP_SM_WAIT_NONE;
221 /* Start the next request on the queue. */
222 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
223 #ifdef CONFIG_ZCRYPT_DEBUG
224 if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
225 AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
226 __func__, ap_msg->fi.cmd);
227 qid = 0xFF00;
228 }
229 #endif
230 status = __ap_send(qid, ap_msg->psmid,
231 ap_msg->msg, ap_msg->len,
232 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
233 switch (status.response_code) {
234 case AP_RESPONSE_NORMAL:
235 aq->queue_count++;
236 if (aq->queue_count == 1)
237 mod_timer(&aq->timeout, jiffies + aq->request_timeout);
238 list_move_tail(&ap_msg->list, &aq->pendingq);
239 aq->requestq_count--;
240 aq->pendingq_count++;
241 if (aq->queue_count < aq->card->queue_depth) {
242 aq->sm_state = AP_SM_STATE_WORKING;
243 return AP_SM_WAIT_AGAIN;
244 }
245 fallthrough;
246 case AP_RESPONSE_Q_FULL:
247 aq->sm_state = AP_SM_STATE_QUEUE_FULL;
248 return AP_SM_WAIT_INTERRUPT;
249 case AP_RESPONSE_RESET_IN_PROGRESS:
250 aq->sm_state = AP_SM_STATE_RESET_WAIT;
251 return AP_SM_WAIT_TIMEOUT;
252 case AP_RESPONSE_INVALID_DOMAIN:
253 AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
254 fallthrough;
255 case AP_RESPONSE_MESSAGE_TOO_BIG:
256 case AP_RESPONSE_REQ_FAC_NOT_INST:
257 list_del_init(&ap_msg->list);
258 aq->requestq_count--;
259 ap_msg->rc = -EINVAL;
260 ap_msg->receive(aq, ap_msg, NULL);
261 return AP_SM_WAIT_AGAIN;
262 default:
263 aq->dev_state = AP_DEV_STATE_ERROR;
264 aq->last_err_rc = status.response_code;
265 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
266 __func__, status.response_code,
267 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
268 return AP_SM_WAIT_NONE;
269 }
270 }
271
272 /**
273 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
274 * @aq: pointer to the AP queue
275 *
276 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
277 */
ap_sm_read_write(struct ap_queue * aq)278 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
279 {
280 return min(ap_sm_read(aq), ap_sm_write(aq));
281 }
282
283 /**
284 * ap_sm_reset(): Reset an AP queue.
285 * @qid: The AP queue number
286 *
287 * Submit the Reset command to an AP queue.
288 */
ap_sm_reset(struct ap_queue * aq)289 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
290 {
291 struct ap_queue_status status;
292
293 status = ap_rapq(aq->qid);
294 switch (status.response_code) {
295 case AP_RESPONSE_NORMAL:
296 case AP_RESPONSE_RESET_IN_PROGRESS:
297 aq->sm_state = AP_SM_STATE_RESET_WAIT;
298 aq->interrupt = AP_INTR_DISABLED;
299 return AP_SM_WAIT_TIMEOUT;
300 default:
301 aq->dev_state = AP_DEV_STATE_ERROR;
302 aq->last_err_rc = status.response_code;
303 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
304 __func__, status.response_code,
305 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
306 return AP_SM_WAIT_NONE;
307 }
308 }
309
310 /**
311 * ap_sm_reset_wait(): Test queue for completion of the reset operation
312 * @aq: pointer to the AP queue
313 *
314 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
315 */
ap_sm_reset_wait(struct ap_queue * aq)316 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
317 {
318 struct ap_queue_status status;
319 void *lsi_ptr;
320
321 if (aq->queue_count > 0 && aq->reply)
322 /* Try to read a completed message and get the status */
323 status = ap_sm_recv(aq);
324 else
325 /* Get the status with TAPQ */
326 status = ap_tapq(aq->qid, NULL);
327
328 switch (status.response_code) {
329 case AP_RESPONSE_NORMAL:
330 lsi_ptr = ap_airq_ptr();
331 if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
332 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
333 else
334 aq->sm_state = (aq->queue_count > 0) ?
335 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
336 return AP_SM_WAIT_AGAIN;
337 case AP_RESPONSE_BUSY:
338 case AP_RESPONSE_RESET_IN_PROGRESS:
339 return AP_SM_WAIT_TIMEOUT;
340 case AP_RESPONSE_Q_NOT_AVAIL:
341 case AP_RESPONSE_DECONFIGURED:
342 case AP_RESPONSE_CHECKSTOPPED:
343 default:
344 aq->dev_state = AP_DEV_STATE_ERROR;
345 aq->last_err_rc = status.response_code;
346 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
347 __func__, status.response_code,
348 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
349 return AP_SM_WAIT_NONE;
350 }
351 }
352
353 /**
354 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
355 * @aq: pointer to the AP queue
356 *
357 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
358 */
ap_sm_setirq_wait(struct ap_queue * aq)359 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
360 {
361 struct ap_queue_status status;
362
363 if (aq->queue_count > 0 && aq->reply)
364 /* Try to read a completed message and get the status */
365 status = ap_sm_recv(aq);
366 else
367 /* Get the status with TAPQ */
368 status = ap_tapq(aq->qid, NULL);
369
370 if (status.irq_enabled == 1) {
371 /* Irqs are now enabled */
372 aq->interrupt = AP_INTR_ENABLED;
373 aq->sm_state = (aq->queue_count > 0) ?
374 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
375 }
376
377 switch (status.response_code) {
378 case AP_RESPONSE_NORMAL:
379 if (aq->queue_count > 0)
380 return AP_SM_WAIT_AGAIN;
381 fallthrough;
382 case AP_RESPONSE_NO_PENDING_REPLY:
383 return AP_SM_WAIT_TIMEOUT;
384 default:
385 aq->dev_state = AP_DEV_STATE_ERROR;
386 aq->last_err_rc = status.response_code;
387 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
388 __func__, status.response_code,
389 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
390 return AP_SM_WAIT_NONE;
391 }
392 }
393
394 /*
395 * AP state machine jump table
396 */
397 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
398 [AP_SM_STATE_RESET_START] = {
399 [AP_SM_EVENT_POLL] = ap_sm_reset,
400 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
401 },
402 [AP_SM_STATE_RESET_WAIT] = {
403 [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
404 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
405 },
406 [AP_SM_STATE_SETIRQ_WAIT] = {
407 [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
408 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
409 },
410 [AP_SM_STATE_IDLE] = {
411 [AP_SM_EVENT_POLL] = ap_sm_write,
412 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
413 },
414 [AP_SM_STATE_WORKING] = {
415 [AP_SM_EVENT_POLL] = ap_sm_read_write,
416 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
417 },
418 [AP_SM_STATE_QUEUE_FULL] = {
419 [AP_SM_EVENT_POLL] = ap_sm_read,
420 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
421 },
422 };
423
ap_sm_event(struct ap_queue * aq,enum ap_sm_event event)424 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
425 {
426 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
427 return ap_jumptable[aq->sm_state][event](aq);
428 else
429 return AP_SM_WAIT_NONE;
430 }
431
ap_sm_event_loop(struct ap_queue * aq,enum ap_sm_event event)432 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
433 {
434 enum ap_sm_wait wait;
435
436 while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
437 ;
438 return wait;
439 }
440
441 /*
442 * AP queue related attributes.
443 */
request_count_show(struct device * dev,struct device_attribute * attr,char * buf)444 static ssize_t request_count_show(struct device *dev,
445 struct device_attribute *attr,
446 char *buf)
447 {
448 struct ap_queue *aq = to_ap_queue(dev);
449 bool valid = false;
450 u64 req_cnt;
451
452 spin_lock_bh(&aq->lock);
453 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
454 req_cnt = aq->total_request_count;
455 valid = true;
456 }
457 spin_unlock_bh(&aq->lock);
458
459 if (valid)
460 return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
461 else
462 return scnprintf(buf, PAGE_SIZE, "-\n");
463 }
464
request_count_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)465 static ssize_t request_count_store(struct device *dev,
466 struct device_attribute *attr,
467 const char *buf, size_t count)
468 {
469 struct ap_queue *aq = to_ap_queue(dev);
470
471 spin_lock_bh(&aq->lock);
472 aq->total_request_count = 0;
473 spin_unlock_bh(&aq->lock);
474
475 return count;
476 }
477
478 static DEVICE_ATTR_RW(request_count);
479
requestq_count_show(struct device * dev,struct device_attribute * attr,char * buf)480 static ssize_t requestq_count_show(struct device *dev,
481 struct device_attribute *attr, char *buf)
482 {
483 struct ap_queue *aq = to_ap_queue(dev);
484 unsigned int reqq_cnt = 0;
485
486 spin_lock_bh(&aq->lock);
487 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
488 reqq_cnt = aq->requestq_count;
489 spin_unlock_bh(&aq->lock);
490 return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
491 }
492
493 static DEVICE_ATTR_RO(requestq_count);
494
pendingq_count_show(struct device * dev,struct device_attribute * attr,char * buf)495 static ssize_t pendingq_count_show(struct device *dev,
496 struct device_attribute *attr, char *buf)
497 {
498 struct ap_queue *aq = to_ap_queue(dev);
499 unsigned int penq_cnt = 0;
500
501 spin_lock_bh(&aq->lock);
502 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
503 penq_cnt = aq->pendingq_count;
504 spin_unlock_bh(&aq->lock);
505 return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
506 }
507
508 static DEVICE_ATTR_RO(pendingq_count);
509
reset_show(struct device * dev,struct device_attribute * attr,char * buf)510 static ssize_t reset_show(struct device *dev,
511 struct device_attribute *attr, char *buf)
512 {
513 struct ap_queue *aq = to_ap_queue(dev);
514 int rc = 0;
515
516 spin_lock_bh(&aq->lock);
517 switch (aq->sm_state) {
518 case AP_SM_STATE_RESET_START:
519 case AP_SM_STATE_RESET_WAIT:
520 rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
521 break;
522 case AP_SM_STATE_WORKING:
523 case AP_SM_STATE_QUEUE_FULL:
524 rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
525 break;
526 default:
527 rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
528 }
529 spin_unlock_bh(&aq->lock);
530 return rc;
531 }
532
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)533 static ssize_t reset_store(struct device *dev,
534 struct device_attribute *attr,
535 const char *buf, size_t count)
536 {
537 struct ap_queue *aq = to_ap_queue(dev);
538
539 spin_lock_bh(&aq->lock);
540 __ap_flush_queue(aq);
541 aq->sm_state = AP_SM_STATE_RESET_START;
542 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
543 spin_unlock_bh(&aq->lock);
544
545 AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
546 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
547
548 return count;
549 }
550
551 static DEVICE_ATTR_RW(reset);
552
interrupt_show(struct device * dev,struct device_attribute * attr,char * buf)553 static ssize_t interrupt_show(struct device *dev,
554 struct device_attribute *attr, char *buf)
555 {
556 struct ap_queue *aq = to_ap_queue(dev);
557 int rc = 0;
558
559 spin_lock_bh(&aq->lock);
560 if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
561 rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
562 else if (aq->interrupt == AP_INTR_ENABLED)
563 rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
564 else
565 rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
566 spin_unlock_bh(&aq->lock);
567 return rc;
568 }
569
570 static DEVICE_ATTR_RO(interrupt);
571
config_show(struct device * dev,struct device_attribute * attr,char * buf)572 static ssize_t config_show(struct device *dev,
573 struct device_attribute *attr, char *buf)
574 {
575 struct ap_queue *aq = to_ap_queue(dev);
576 int rc;
577
578 spin_lock_bh(&aq->lock);
579 rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
580 spin_unlock_bh(&aq->lock);
581 return rc;
582 }
583
584 static DEVICE_ATTR_RO(config);
585
586 #ifdef CONFIG_ZCRYPT_DEBUG
states_show(struct device * dev,struct device_attribute * attr,char * buf)587 static ssize_t states_show(struct device *dev,
588 struct device_attribute *attr, char *buf)
589 {
590 struct ap_queue *aq = to_ap_queue(dev);
591 int rc = 0;
592
593 spin_lock_bh(&aq->lock);
594 /* queue device state */
595 switch (aq->dev_state) {
596 case AP_DEV_STATE_UNINITIATED:
597 rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
598 break;
599 case AP_DEV_STATE_OPERATING:
600 rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
601 break;
602 case AP_DEV_STATE_SHUTDOWN:
603 rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
604 break;
605 case AP_DEV_STATE_ERROR:
606 rc = scnprintf(buf, PAGE_SIZE, "ERROR");
607 break;
608 default:
609 rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
610 }
611 /* state machine state */
612 if (aq->dev_state) {
613 switch (aq->sm_state) {
614 case AP_SM_STATE_RESET_START:
615 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
616 " [RESET_START]\n");
617 break;
618 case AP_SM_STATE_RESET_WAIT:
619 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
620 " [RESET_WAIT]\n");
621 break;
622 case AP_SM_STATE_SETIRQ_WAIT:
623 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
624 " [SETIRQ_WAIT]\n");
625 break;
626 case AP_SM_STATE_IDLE:
627 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
628 " [IDLE]\n");
629 break;
630 case AP_SM_STATE_WORKING:
631 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
632 " [WORKING]\n");
633 break;
634 case AP_SM_STATE_QUEUE_FULL:
635 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
636 " [FULL]\n");
637 break;
638 default:
639 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
640 " [UNKNOWN]\n");
641 }
642 }
643 spin_unlock_bh(&aq->lock);
644
645 return rc;
646 }
647 static DEVICE_ATTR_RO(states);
648
last_err_rc_show(struct device * dev,struct device_attribute * attr,char * buf)649 static ssize_t last_err_rc_show(struct device *dev,
650 struct device_attribute *attr, char *buf)
651 {
652 struct ap_queue *aq = to_ap_queue(dev);
653 int rc;
654
655 spin_lock_bh(&aq->lock);
656 rc = aq->last_err_rc;
657 spin_unlock_bh(&aq->lock);
658
659 switch (rc) {
660 case AP_RESPONSE_NORMAL:
661 return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
662 case AP_RESPONSE_Q_NOT_AVAIL:
663 return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
664 case AP_RESPONSE_RESET_IN_PROGRESS:
665 return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
666 case AP_RESPONSE_DECONFIGURED:
667 return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
668 case AP_RESPONSE_CHECKSTOPPED:
669 return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
670 case AP_RESPONSE_BUSY:
671 return scnprintf(buf, PAGE_SIZE, "BUSY\n");
672 case AP_RESPONSE_INVALID_ADDRESS:
673 return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
674 case AP_RESPONSE_OTHERWISE_CHANGED:
675 return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
676 case AP_RESPONSE_Q_FULL:
677 return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
678 case AP_RESPONSE_INDEX_TOO_BIG:
679 return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
680 case AP_RESPONSE_NO_FIRST_PART:
681 return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
682 case AP_RESPONSE_MESSAGE_TOO_BIG:
683 return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
684 case AP_RESPONSE_REQ_FAC_NOT_INST:
685 return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
686 default:
687 return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
688 }
689 }
690 static DEVICE_ATTR_RO(last_err_rc);
691 #endif
692
693 static struct attribute *ap_queue_dev_attrs[] = {
694 &dev_attr_request_count.attr,
695 &dev_attr_requestq_count.attr,
696 &dev_attr_pendingq_count.attr,
697 &dev_attr_reset.attr,
698 &dev_attr_interrupt.attr,
699 &dev_attr_config.attr,
700 #ifdef CONFIG_ZCRYPT_DEBUG
701 &dev_attr_states.attr,
702 &dev_attr_last_err_rc.attr,
703 #endif
704 NULL
705 };
706
707 static struct attribute_group ap_queue_dev_attr_group = {
708 .attrs = ap_queue_dev_attrs
709 };
710
711 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
712 &ap_queue_dev_attr_group,
713 NULL
714 };
715
716 static struct device_type ap_queue_type = {
717 .name = "ap_queue",
718 .groups = ap_queue_dev_attr_groups,
719 };
720
ap_queue_device_release(struct device * dev)721 static void ap_queue_device_release(struct device *dev)
722 {
723 struct ap_queue *aq = to_ap_queue(dev);
724
725 spin_lock_bh(&ap_queues_lock);
726 hash_del(&aq->hnode);
727 spin_unlock_bh(&ap_queues_lock);
728
729 kfree(aq);
730 }
731
ap_queue_create(ap_qid_t qid,int device_type)732 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
733 {
734 struct ap_queue *aq;
735
736 aq = kzalloc(sizeof(*aq), GFP_KERNEL);
737 if (!aq)
738 return NULL;
739 aq->ap_dev.device.release = ap_queue_device_release;
740 aq->ap_dev.device.type = &ap_queue_type;
741 aq->ap_dev.device_type = device_type;
742 aq->qid = qid;
743 aq->interrupt = AP_INTR_DISABLED;
744 spin_lock_init(&aq->lock);
745 INIT_LIST_HEAD(&aq->pendingq);
746 INIT_LIST_HEAD(&aq->requestq);
747 timer_setup(&aq->timeout, ap_request_timeout, 0);
748
749 return aq;
750 }
751
ap_queue_init_reply(struct ap_queue * aq,struct ap_message * reply)752 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
753 {
754 aq->reply = reply;
755
756 spin_lock_bh(&aq->lock);
757 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
758 spin_unlock_bh(&aq->lock);
759 }
760 EXPORT_SYMBOL(ap_queue_init_reply);
761
762 /**
763 * ap_queue_message(): Queue a request to an AP device.
764 * @aq: The AP device to queue the message to
765 * @ap_msg: The message that is to be added
766 */
ap_queue_message(struct ap_queue * aq,struct ap_message * ap_msg)767 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
768 {
769 int rc = 0;
770
771 /* msg needs to have a valid receive-callback */
772 BUG_ON(!ap_msg->receive);
773
774 spin_lock_bh(&aq->lock);
775
776 /* only allow to queue new messages if device state is ok */
777 if (aq->dev_state == AP_DEV_STATE_OPERATING) {
778 list_add_tail(&ap_msg->list, &aq->requestq);
779 aq->requestq_count++;
780 aq->total_request_count++;
781 atomic64_inc(&aq->card->total_request_count);
782 } else
783 rc = -ENODEV;
784
785 /* Send/receive as many request from the queue as possible. */
786 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
787
788 spin_unlock_bh(&aq->lock);
789
790 return rc;
791 }
792 EXPORT_SYMBOL(ap_queue_message);
793
794 /**
795 * ap_cancel_message(): Cancel a crypto request.
796 * @aq: The AP device that has the message queued
797 * @ap_msg: The message that is to be removed
798 *
799 * Cancel a crypto request. This is done by removing the request
800 * from the device pending or request queue. Note that the
801 * request stays on the AP queue. When it finishes the message
802 * reply will be discarded because the psmid can't be found.
803 */
ap_cancel_message(struct ap_queue * aq,struct ap_message * ap_msg)804 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
805 {
806 struct ap_message *tmp;
807
808 spin_lock_bh(&aq->lock);
809 if (!list_empty(&ap_msg->list)) {
810 list_for_each_entry(tmp, &aq->pendingq, list)
811 if (tmp->psmid == ap_msg->psmid) {
812 aq->pendingq_count--;
813 goto found;
814 }
815 aq->requestq_count--;
816 found:
817 list_del_init(&ap_msg->list);
818 }
819 spin_unlock_bh(&aq->lock);
820 }
821 EXPORT_SYMBOL(ap_cancel_message);
822
823 /**
824 * __ap_flush_queue(): Flush requests.
825 * @aq: Pointer to the AP queue
826 *
827 * Flush all requests from the request/pending queue of an AP device.
828 */
__ap_flush_queue(struct ap_queue * aq)829 static void __ap_flush_queue(struct ap_queue *aq)
830 {
831 struct ap_message *ap_msg, *next;
832
833 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
834 list_del_init(&ap_msg->list);
835 aq->pendingq_count--;
836 ap_msg->rc = -EAGAIN;
837 ap_msg->receive(aq, ap_msg, NULL);
838 }
839 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
840 list_del_init(&ap_msg->list);
841 aq->requestq_count--;
842 ap_msg->rc = -EAGAIN;
843 ap_msg->receive(aq, ap_msg, NULL);
844 }
845 aq->queue_count = 0;
846 }
847
ap_flush_queue(struct ap_queue * aq)848 void ap_flush_queue(struct ap_queue *aq)
849 {
850 spin_lock_bh(&aq->lock);
851 __ap_flush_queue(aq);
852 spin_unlock_bh(&aq->lock);
853 }
854 EXPORT_SYMBOL(ap_flush_queue);
855
ap_queue_prepare_remove(struct ap_queue * aq)856 void ap_queue_prepare_remove(struct ap_queue *aq)
857 {
858 spin_lock_bh(&aq->lock);
859 /* flush queue */
860 __ap_flush_queue(aq);
861 /* move queue device state to SHUTDOWN in progress */
862 aq->dev_state = AP_DEV_STATE_SHUTDOWN;
863 spin_unlock_bh(&aq->lock);
864 del_timer_sync(&aq->timeout);
865 }
866
ap_queue_remove(struct ap_queue * aq)867 void ap_queue_remove(struct ap_queue *aq)
868 {
869 /*
870 * all messages have been flushed and the device state
871 * is SHUTDOWN. Now reset with zero which also clears
872 * the irq registration and move the device state
873 * to the initial value AP_DEV_STATE_UNINITIATED.
874 */
875 spin_lock_bh(&aq->lock);
876 ap_zapq(aq->qid);
877 aq->dev_state = AP_DEV_STATE_UNINITIATED;
878 spin_unlock_bh(&aq->lock);
879 }
880
ap_queue_init_state(struct ap_queue * aq)881 void ap_queue_init_state(struct ap_queue *aq)
882 {
883 spin_lock_bh(&aq->lock);
884 aq->dev_state = AP_DEV_STATE_OPERATING;
885 aq->sm_state = AP_SM_STATE_RESET_START;
886 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
887 spin_unlock_bh(&aq->lock);
888 }
889 EXPORT_SYMBOL(ap_queue_init_state);
890