1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2016, 2023
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Adjunct processor bus, queue related code.
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
19 static void __ap_flush_queue(struct ap_queue *aq);
22 * some AP queue helper functions
25 static inline bool ap_q_supports_bind(struct ap_queue *aq)
27 return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
30 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
32 return aq->card->hwinfo.ep11;
35 static inline bool ap_q_needs_bind(struct ap_queue *aq)
37 return ap_q_supports_bind(aq) && ap_sb_available();
41 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
43 * @ind: the notification indicator byte
45 * Enables interruption on AP queue via ap_aqic(). Based on the return
46 * value it waits a while and tests the AP queue if interrupts
47 * have been switched on using ap_test_queue().
49 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
51 union ap_qirq_ctrl qirqctrl = { .value = 0 };
52 struct ap_queue_status status;
55 qirqctrl.isc = AP_ISC;
56 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
59 switch (status.response_code) {
60 case AP_RESPONSE_NORMAL:
61 case AP_RESPONSE_OTHERWISE_CHANGED:
63 case AP_RESPONSE_Q_NOT_AVAIL:
64 case AP_RESPONSE_DECONFIGURED:
65 case AP_RESPONSE_CHECKSTOPPED:
66 case AP_RESPONSE_INVALID_ADDRESS:
67 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
69 AP_QID_QUEUE(aq->qid));
71 case AP_RESPONSE_RESET_IN_PROGRESS:
72 case AP_RESPONSE_BUSY:
79 * __ap_send(): Send message to adjunct processor queue.
80 * @qid: The AP queue number
81 * @psmid: The program supplied message identifier
82 * @msg: The message text
83 * @msglen: The message length
84 * @special: Special Bit
86 * Returns AP queue status structure.
87 * Condition code 1 on NQAP can't happen because the L bit is 1.
88 * Condition code 2 on NQAP also means the send is incomplete,
89 * because a segment boundary was reached. The NQAP is repeated.
91 static inline struct ap_queue_status
92 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
97 return ap_nqap(qid, psmid, msg, msglen);
100 /* State machine definitions and helpers */
102 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
104 return AP_SM_WAIT_NONE;
108 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
109 * not change the state of the device.
110 * @aq: pointer to the AP queue
112 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
114 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
116 struct ap_queue_status status;
117 struct ap_message *ap_msg;
120 unsigned long resgr0 = 0;
124 * DQAP loop until response code and resgr0 indicate that
125 * the msg is totally received. As we use the very same buffer
126 * the msg is overwritten with each invocation. That's intended
127 * and the receiver of the msg is informed with a msg rc code
128 * of EMSGSIZE in such a case.
131 status = ap_dqap(aq->qid, &aq->reply->psmid,
132 aq->reply->msg, aq->reply->bufsize,
133 &aq->reply->len, &reslen, &resgr0);
135 } while (status.response_code == 0xFF && resgr0 != 0);
137 switch (status.response_code) {
138 case AP_RESPONSE_NORMAL:
139 aq->queue_count = max_t(int, 0, aq->queue_count - 1);
140 if (!status.queue_empty && !aq->queue_count)
142 if (aq->queue_count > 0)
143 mod_timer(&aq->timeout,
144 jiffies + aq->request_timeout);
145 list_for_each_entry(ap_msg, &aq->pendingq, list) {
146 if (ap_msg->psmid != aq->reply->psmid)
148 list_del_init(&ap_msg->list);
149 aq->pendingq_count--;
151 ap_msg->rc = -EMSGSIZE;
152 ap_msg->receive(aq, ap_msg, NULL);
154 ap_msg->receive(aq, ap_msg, aq->reply);
160 AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
161 __func__, aq->reply->psmid,
162 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
165 case AP_RESPONSE_NO_PENDING_REPLY:
166 if (!status.queue_empty || aq->queue_count <= 0)
168 /* The card shouldn't forget requests but who knows. */
170 list_splice_init(&aq->pendingq, &aq->requestq);
171 aq->requestq_count += aq->pendingq_count;
172 aq->pendingq_count = 0;
181 * ap_sm_read(): Receive pending reply messages from an AP queue.
182 * @aq: pointer to the AP queue
184 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
186 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
188 struct ap_queue_status status;
191 return AP_SM_WAIT_NONE;
192 status = ap_sm_recv(aq);
194 return AP_SM_WAIT_NONE;
195 switch (status.response_code) {
196 case AP_RESPONSE_NORMAL:
197 if (aq->queue_count > 0) {
198 aq->sm_state = AP_SM_STATE_WORKING;
199 return AP_SM_WAIT_AGAIN;
201 aq->sm_state = AP_SM_STATE_IDLE;
203 case AP_RESPONSE_NO_PENDING_REPLY:
204 if (aq->queue_count > 0)
205 return status.irq_enabled ?
206 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
207 aq->sm_state = AP_SM_STATE_IDLE;
210 aq->dev_state = AP_DEV_STATE_ERROR;
211 aq->last_err_rc = status.response_code;
212 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
213 __func__, status.response_code,
214 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
215 return AP_SM_WAIT_NONE;
217 /* Check and maybe enable irq support (again) on this queue */
218 if (!status.irq_enabled && status.queue_empty) {
219 void *lsi_ptr = ap_airq_ptr();
221 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
222 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
223 return AP_SM_WAIT_AGAIN;
226 return AP_SM_WAIT_NONE;
230 * ap_sm_write(): Send messages from the request queue to an AP queue.
231 * @aq: pointer to the AP queue
233 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
235 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
237 struct ap_queue_status status;
238 struct ap_message *ap_msg;
239 ap_qid_t qid = aq->qid;
241 if (aq->requestq_count <= 0)
242 return AP_SM_WAIT_NONE;
244 /* Start the next request on the queue. */
245 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
246 status = __ap_send(qid, ap_msg->psmid,
247 ap_msg->msg, ap_msg->len,
248 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
250 return AP_SM_WAIT_NONE;
251 switch (status.response_code) {
252 case AP_RESPONSE_NORMAL:
253 aq->queue_count = max_t(int, 1, aq->queue_count + 1);
254 if (aq->queue_count == 1)
255 mod_timer(&aq->timeout, jiffies + aq->request_timeout);
256 list_move_tail(&ap_msg->list, &aq->pendingq);
257 aq->requestq_count--;
258 aq->pendingq_count++;
259 if (aq->queue_count < aq->card->hwinfo.qd) {
260 aq->sm_state = AP_SM_STATE_WORKING;
261 return AP_SM_WAIT_AGAIN;
264 case AP_RESPONSE_Q_FULL:
265 aq->sm_state = AP_SM_STATE_QUEUE_FULL;
266 return status.irq_enabled ?
267 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
268 case AP_RESPONSE_RESET_IN_PROGRESS:
269 aq->sm_state = AP_SM_STATE_RESET_WAIT;
270 return AP_SM_WAIT_LOW_TIMEOUT;
271 case AP_RESPONSE_INVALID_DOMAIN:
272 AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
274 case AP_RESPONSE_MESSAGE_TOO_BIG:
275 case AP_RESPONSE_REQ_FAC_NOT_INST:
276 list_del_init(&ap_msg->list);
277 aq->requestq_count--;
278 ap_msg->rc = -EINVAL;
279 ap_msg->receive(aq, ap_msg, NULL);
280 return AP_SM_WAIT_AGAIN;
282 aq->dev_state = AP_DEV_STATE_ERROR;
283 aq->last_err_rc = status.response_code;
284 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
285 __func__, status.response_code,
286 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
287 return AP_SM_WAIT_NONE;
292 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
293 * @aq: pointer to the AP queue
295 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
297 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
299 return min(ap_sm_read(aq), ap_sm_write(aq));
303 * ap_sm_reset(): Reset an AP queue.
306 * Submit the Reset command to an AP queue.
308 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
310 struct ap_queue_status status;
312 status = ap_rapq(aq->qid, aq->rapq_fbit);
314 return AP_SM_WAIT_NONE;
315 switch (status.response_code) {
316 case AP_RESPONSE_NORMAL:
317 case AP_RESPONSE_RESET_IN_PROGRESS:
318 aq->sm_state = AP_SM_STATE_RESET_WAIT;
320 return AP_SM_WAIT_LOW_TIMEOUT;
322 aq->dev_state = AP_DEV_STATE_ERROR;
323 aq->last_err_rc = status.response_code;
324 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
325 __func__, status.response_code,
326 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
327 return AP_SM_WAIT_NONE;
332 * ap_sm_reset_wait(): Test queue for completion of the reset operation
333 * @aq: pointer to the AP queue
335 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
337 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
339 struct ap_queue_status status;
340 struct ap_tapq_hwinfo hwinfo;
343 /* Get the status with TAPQ */
344 status = ap_test_queue(aq->qid, 1, &hwinfo);
346 switch (status.response_code) {
347 case AP_RESPONSE_NORMAL:
348 aq->se_bstate = hwinfo.bs;
349 lsi_ptr = ap_airq_ptr();
350 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
351 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
353 aq->sm_state = (aq->queue_count > 0) ?
354 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
355 return AP_SM_WAIT_AGAIN;
356 case AP_RESPONSE_BUSY:
357 case AP_RESPONSE_RESET_IN_PROGRESS:
358 return AP_SM_WAIT_LOW_TIMEOUT;
359 case AP_RESPONSE_Q_NOT_AVAIL:
360 case AP_RESPONSE_DECONFIGURED:
361 case AP_RESPONSE_CHECKSTOPPED:
363 aq->dev_state = AP_DEV_STATE_ERROR;
364 aq->last_err_rc = status.response_code;
365 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
366 __func__, status.response_code,
367 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
368 return AP_SM_WAIT_NONE;
373 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
374 * @aq: pointer to the AP queue
376 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
378 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
380 struct ap_queue_status status;
382 if (aq->queue_count > 0 && aq->reply)
383 /* Try to read a completed message and get the status */
384 status = ap_sm_recv(aq);
386 /* Get the status with TAPQ */
387 status = ap_tapq(aq->qid, NULL);
389 if (status.irq_enabled == 1) {
390 /* Irqs are now enabled */
391 aq->sm_state = (aq->queue_count > 0) ?
392 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
395 switch (status.response_code) {
396 case AP_RESPONSE_NORMAL:
397 if (aq->queue_count > 0)
398 return AP_SM_WAIT_AGAIN;
400 case AP_RESPONSE_NO_PENDING_REPLY:
401 return AP_SM_WAIT_LOW_TIMEOUT;
403 aq->dev_state = AP_DEV_STATE_ERROR;
404 aq->last_err_rc = status.response_code;
405 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
406 __func__, status.response_code,
407 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
408 return AP_SM_WAIT_NONE;
413 * ap_sm_assoc_wait(): Test queue for completion of a pending
414 * association request.
415 * @aq: pointer to the AP queue
417 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
419 struct ap_queue_status status;
420 struct ap_tapq_hwinfo hwinfo;
422 status = ap_test_queue(aq->qid, 1, &hwinfo);
423 /* handle asynchronous error on this queue */
424 if (status.async && status.response_code) {
425 aq->dev_state = AP_DEV_STATE_ERROR;
426 aq->last_err_rc = status.response_code;
427 AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
428 __func__, status.response_code,
429 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
430 return AP_SM_WAIT_NONE;
432 if (status.response_code > AP_RESPONSE_BUSY) {
433 aq->dev_state = AP_DEV_STATE_ERROR;
434 aq->last_err_rc = status.response_code;
435 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
436 __func__, status.response_code,
437 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
438 return AP_SM_WAIT_NONE;
441 /* update queue's SE bind state */
442 aq->se_bstate = hwinfo.bs;
447 /* association is through */
448 aq->sm_state = AP_SM_STATE_IDLE;
449 AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
450 __func__, AP_QID_CARD(aq->qid),
451 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
452 return AP_SM_WAIT_NONE;
453 case AP_BS_Q_USABLE_NO_SECURE_KEY:
454 /* association still pending */
455 return AP_SM_WAIT_LOW_TIMEOUT;
457 /* reset from 'outside' happened or no idea at all */
458 aq->assoc_idx = ASSOC_IDX_INVALID;
459 aq->dev_state = AP_DEV_STATE_ERROR;
460 aq->last_err_rc = status.response_code;
461 AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
463 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
464 return AP_SM_WAIT_NONE;
469 * AP state machine jump table
471 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
472 [AP_SM_STATE_RESET_START] = {
473 [AP_SM_EVENT_POLL] = ap_sm_reset,
474 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
476 [AP_SM_STATE_RESET_WAIT] = {
477 [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
478 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
480 [AP_SM_STATE_SETIRQ_WAIT] = {
481 [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
482 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
484 [AP_SM_STATE_IDLE] = {
485 [AP_SM_EVENT_POLL] = ap_sm_write,
486 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
488 [AP_SM_STATE_WORKING] = {
489 [AP_SM_EVENT_POLL] = ap_sm_read_write,
490 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
492 [AP_SM_STATE_QUEUE_FULL] = {
493 [AP_SM_EVENT_POLL] = ap_sm_read,
494 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
496 [AP_SM_STATE_ASSOC_WAIT] = {
497 [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
498 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
502 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
504 if (aq->config && !aq->chkstop &&
505 aq->dev_state > AP_DEV_STATE_UNINITIATED)
506 return ap_jumptable[aq->sm_state][event](aq);
508 return AP_SM_WAIT_NONE;
511 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
513 enum ap_sm_wait wait;
515 while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
521 * AP queue related attributes.
523 static ssize_t request_count_show(struct device *dev,
524 struct device_attribute *attr,
527 struct ap_queue *aq = to_ap_queue(dev);
531 spin_lock_bh(&aq->lock);
532 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
533 req_cnt = aq->total_request_count;
536 spin_unlock_bh(&aq->lock);
539 return sysfs_emit(buf, "%llu\n", req_cnt);
541 return sysfs_emit(buf, "-\n");
544 static ssize_t request_count_store(struct device *dev,
545 struct device_attribute *attr,
546 const char *buf, size_t count)
548 struct ap_queue *aq = to_ap_queue(dev);
550 spin_lock_bh(&aq->lock);
551 aq->total_request_count = 0;
552 spin_unlock_bh(&aq->lock);
557 static DEVICE_ATTR_RW(request_count);
559 static ssize_t requestq_count_show(struct device *dev,
560 struct device_attribute *attr, char *buf)
562 struct ap_queue *aq = to_ap_queue(dev);
563 unsigned int reqq_cnt = 0;
565 spin_lock_bh(&aq->lock);
566 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
567 reqq_cnt = aq->requestq_count;
568 spin_unlock_bh(&aq->lock);
569 return sysfs_emit(buf, "%d\n", reqq_cnt);
572 static DEVICE_ATTR_RO(requestq_count);
574 static ssize_t pendingq_count_show(struct device *dev,
575 struct device_attribute *attr, char *buf)
577 struct ap_queue *aq = to_ap_queue(dev);
578 unsigned int penq_cnt = 0;
580 spin_lock_bh(&aq->lock);
581 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
582 penq_cnt = aq->pendingq_count;
583 spin_unlock_bh(&aq->lock);
584 return sysfs_emit(buf, "%d\n", penq_cnt);
587 static DEVICE_ATTR_RO(pendingq_count);
589 static ssize_t reset_show(struct device *dev,
590 struct device_attribute *attr, char *buf)
592 struct ap_queue *aq = to_ap_queue(dev);
595 spin_lock_bh(&aq->lock);
596 switch (aq->sm_state) {
597 case AP_SM_STATE_RESET_START:
598 case AP_SM_STATE_RESET_WAIT:
599 rc = sysfs_emit(buf, "Reset in progress.\n");
601 case AP_SM_STATE_WORKING:
602 case AP_SM_STATE_QUEUE_FULL:
603 rc = sysfs_emit(buf, "Reset Timer armed.\n");
606 rc = sysfs_emit(buf, "No Reset Timer set.\n");
608 spin_unlock_bh(&aq->lock);
612 static ssize_t reset_store(struct device *dev,
613 struct device_attribute *attr,
614 const char *buf, size_t count)
616 struct ap_queue *aq = to_ap_queue(dev);
618 spin_lock_bh(&aq->lock);
619 __ap_flush_queue(aq);
620 aq->sm_state = AP_SM_STATE_RESET_START;
621 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
622 spin_unlock_bh(&aq->lock);
624 AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
625 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
630 static DEVICE_ATTR_RW(reset);
632 static ssize_t interrupt_show(struct device *dev,
633 struct device_attribute *attr, char *buf)
635 struct ap_queue *aq = to_ap_queue(dev);
636 struct ap_queue_status status;
639 spin_lock_bh(&aq->lock);
640 if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
641 rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
643 status = ap_tapq(aq->qid, NULL);
644 if (status.irq_enabled)
645 rc = sysfs_emit(buf, "Interrupts enabled.\n");
647 rc = sysfs_emit(buf, "Interrupts disabled.\n");
649 spin_unlock_bh(&aq->lock);
654 static DEVICE_ATTR_RO(interrupt);
656 static ssize_t config_show(struct device *dev,
657 struct device_attribute *attr, char *buf)
659 struct ap_queue *aq = to_ap_queue(dev);
662 spin_lock_bh(&aq->lock);
663 rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
664 spin_unlock_bh(&aq->lock);
668 static DEVICE_ATTR_RO(config);
670 static ssize_t chkstop_show(struct device *dev,
671 struct device_attribute *attr, char *buf)
673 struct ap_queue *aq = to_ap_queue(dev);
676 spin_lock_bh(&aq->lock);
677 rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
678 spin_unlock_bh(&aq->lock);
682 static DEVICE_ATTR_RO(chkstop);
684 static ssize_t ap_functions_show(struct device *dev,
685 struct device_attribute *attr, char *buf)
687 struct ap_queue *aq = to_ap_queue(dev);
688 struct ap_queue_status status;
689 struct ap_tapq_hwinfo hwinfo;
691 status = ap_test_queue(aq->qid, 1, &hwinfo);
692 if (status.response_code > AP_RESPONSE_BUSY) {
693 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
694 __func__, status.response_code,
695 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
699 return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
702 static DEVICE_ATTR_RO(ap_functions);
704 #ifdef CONFIG_ZCRYPT_DEBUG
705 static ssize_t states_show(struct device *dev,
706 struct device_attribute *attr, char *buf)
708 struct ap_queue *aq = to_ap_queue(dev);
711 spin_lock_bh(&aq->lock);
712 /* queue device state */
713 switch (aq->dev_state) {
714 case AP_DEV_STATE_UNINITIATED:
715 rc = sysfs_emit(buf, "UNINITIATED\n");
717 case AP_DEV_STATE_OPERATING:
718 rc = sysfs_emit(buf, "OPERATING");
720 case AP_DEV_STATE_SHUTDOWN:
721 rc = sysfs_emit(buf, "SHUTDOWN");
723 case AP_DEV_STATE_ERROR:
724 rc = sysfs_emit(buf, "ERROR");
727 rc = sysfs_emit(buf, "UNKNOWN");
729 /* state machine state */
731 switch (aq->sm_state) {
732 case AP_SM_STATE_RESET_START:
733 rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
735 case AP_SM_STATE_RESET_WAIT:
736 rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
738 case AP_SM_STATE_SETIRQ_WAIT:
739 rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
741 case AP_SM_STATE_IDLE:
742 rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
744 case AP_SM_STATE_WORKING:
745 rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
747 case AP_SM_STATE_QUEUE_FULL:
748 rc += sysfs_emit_at(buf, rc, " [FULL]\n");
750 case AP_SM_STATE_ASSOC_WAIT:
751 rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
754 rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
757 spin_unlock_bh(&aq->lock);
761 static DEVICE_ATTR_RO(states);
763 static ssize_t last_err_rc_show(struct device *dev,
764 struct device_attribute *attr, char *buf)
766 struct ap_queue *aq = to_ap_queue(dev);
769 spin_lock_bh(&aq->lock);
770 rc = aq->last_err_rc;
771 spin_unlock_bh(&aq->lock);
774 case AP_RESPONSE_NORMAL:
775 return sysfs_emit(buf, "NORMAL\n");
776 case AP_RESPONSE_Q_NOT_AVAIL:
777 return sysfs_emit(buf, "Q_NOT_AVAIL\n");
778 case AP_RESPONSE_RESET_IN_PROGRESS:
779 return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
780 case AP_RESPONSE_DECONFIGURED:
781 return sysfs_emit(buf, "DECONFIGURED\n");
782 case AP_RESPONSE_CHECKSTOPPED:
783 return sysfs_emit(buf, "CHECKSTOPPED\n");
784 case AP_RESPONSE_BUSY:
785 return sysfs_emit(buf, "BUSY\n");
786 case AP_RESPONSE_INVALID_ADDRESS:
787 return sysfs_emit(buf, "INVALID_ADDRESS\n");
788 case AP_RESPONSE_OTHERWISE_CHANGED:
789 return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
790 case AP_RESPONSE_Q_FULL:
791 return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
792 case AP_RESPONSE_INDEX_TOO_BIG:
793 return sysfs_emit(buf, "INDEX_TOO_BIG\n");
794 case AP_RESPONSE_NO_FIRST_PART:
795 return sysfs_emit(buf, "NO_FIRST_PART\n");
796 case AP_RESPONSE_MESSAGE_TOO_BIG:
797 return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
798 case AP_RESPONSE_REQ_FAC_NOT_INST:
799 return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
801 return sysfs_emit(buf, "response code %d\n", rc);
804 static DEVICE_ATTR_RO(last_err_rc);
807 static struct attribute *ap_queue_dev_attrs[] = {
808 &dev_attr_request_count.attr,
809 &dev_attr_requestq_count.attr,
810 &dev_attr_pendingq_count.attr,
811 &dev_attr_reset.attr,
812 &dev_attr_interrupt.attr,
813 &dev_attr_config.attr,
814 &dev_attr_chkstop.attr,
815 &dev_attr_ap_functions.attr,
816 #ifdef CONFIG_ZCRYPT_DEBUG
817 &dev_attr_states.attr,
818 &dev_attr_last_err_rc.attr,
823 static struct attribute_group ap_queue_dev_attr_group = {
824 .attrs = ap_queue_dev_attrs
827 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
828 &ap_queue_dev_attr_group,
832 static struct device_type ap_queue_type = {
834 .groups = ap_queue_dev_attr_groups,
837 static ssize_t se_bind_show(struct device *dev,
838 struct device_attribute *attr, char *buf)
840 struct ap_queue *aq = to_ap_queue(dev);
841 struct ap_queue_status status;
842 struct ap_tapq_hwinfo hwinfo;
844 if (!ap_q_supports_bind(aq))
845 return sysfs_emit(buf, "-\n");
847 status = ap_test_queue(aq->qid, 1, &hwinfo);
848 if (status.response_code > AP_RESPONSE_BUSY) {
849 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
850 __func__, status.response_code,
851 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
855 /* update queue's SE bind state */
856 spin_lock_bh(&aq->lock);
857 aq->se_bstate = hwinfo.bs;
858 spin_unlock_bh(&aq->lock);
862 case AP_BS_Q_USABLE_NO_SECURE_KEY:
863 return sysfs_emit(buf, "bound\n");
865 return sysfs_emit(buf, "unbound\n");
869 static ssize_t se_bind_store(struct device *dev,
870 struct device_attribute *attr,
871 const char *buf, size_t count)
873 struct ap_queue *aq = to_ap_queue(dev);
874 struct ap_queue_status status;
875 struct ap_tapq_hwinfo hwinfo;
879 if (!ap_q_supports_bind(aq))
882 /* only 0 (unbind) and 1 (bind) allowed */
883 rc = kstrtobool(buf, &value);
888 /* Unbind. Set F bit arg and trigger RAPQ */
889 spin_lock_bh(&aq->lock);
890 __ap_flush_queue(aq);
892 _ap_queue_init_state(aq);
897 /* Bind. Check current SE bind state */
898 status = ap_test_queue(aq->qid, 1, &hwinfo);
899 if (status.response_code) {
900 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
901 __func__, status.response_code,
902 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
906 /* Update BS state */
907 spin_lock_bh(&aq->lock);
908 aq->se_bstate = hwinfo.bs;
909 if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
910 AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
912 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
918 if (aq->sm_state < AP_SM_STATE_IDLE) {
924 status = ap_bapq(aq->qid);
925 if (status.response_code) {
926 AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
927 __func__, status.response_code,
928 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
932 aq->assoc_idx = ASSOC_IDX_INVALID;
934 /* verify SE bind state */
935 status = ap_test_queue(aq->qid, 1, &hwinfo);
936 if (status.response_code) {
937 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
938 __func__, status.response_code,
939 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
943 aq->se_bstate = hwinfo.bs;
944 if (!(hwinfo.bs == AP_BS_Q_USABLE ||
945 hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
946 AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
948 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
953 /* SE bind was successful */
954 AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
955 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
959 spin_unlock_bh(&aq->lock);
963 static DEVICE_ATTR_RW(se_bind);
965 static ssize_t se_associate_show(struct device *dev,
966 struct device_attribute *attr, char *buf)
968 struct ap_queue *aq = to_ap_queue(dev);
969 struct ap_queue_status status;
970 struct ap_tapq_hwinfo hwinfo;
972 if (!ap_q_supports_assoc(aq))
973 return sysfs_emit(buf, "-\n");
975 status = ap_test_queue(aq->qid, 1, &hwinfo);
976 if (status.response_code > AP_RESPONSE_BUSY) {
977 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
978 __func__, status.response_code,
979 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
983 /* update queue's SE bind state */
984 spin_lock_bh(&aq->lock);
985 aq->se_bstate = hwinfo.bs;
986 spin_unlock_bh(&aq->lock);
990 if (aq->assoc_idx == ASSOC_IDX_INVALID) {
991 AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
994 return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
995 case AP_BS_Q_USABLE_NO_SECURE_KEY:
996 if (aq->assoc_idx != ASSOC_IDX_INVALID)
997 return sysfs_emit(buf, "association pending\n");
1000 return sysfs_emit(buf, "unassociated\n");
1004 static ssize_t se_associate_store(struct device *dev,
1005 struct device_attribute *attr,
1006 const char *buf, size_t count)
1008 struct ap_queue *aq = to_ap_queue(dev);
1009 struct ap_queue_status status;
1010 struct ap_tapq_hwinfo hwinfo;
1014 if (!ap_q_supports_assoc(aq))
1017 /* association index needs to be >= 0 */
1018 rc = kstrtouint(buf, 0, &value);
1021 if (value >= ASSOC_IDX_INVALID)
1024 /* check current SE bind state */
1025 status = ap_test_queue(aq->qid, 1, &hwinfo);
1026 if (status.response_code) {
1027 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1028 __func__, status.response_code,
1029 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1032 spin_lock_bh(&aq->lock);
1033 aq->se_bstate = hwinfo.bs;
1034 if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1035 AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1036 __func__, hwinfo.bs,
1037 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1042 /* check SM state */
1043 if (aq->sm_state != AP_SM_STATE_IDLE) {
1048 /* trigger the asynchronous association request */
1049 status = ap_aapq(aq->qid, value);
1050 switch (status.response_code) {
1051 case AP_RESPONSE_NORMAL:
1052 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1053 aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1054 aq->assoc_idx = value;
1055 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1058 AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1059 __func__, status.response_code,
1060 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1068 spin_unlock_bh(&aq->lock);
1072 static DEVICE_ATTR_RW(se_associate);
1074 static struct attribute *ap_queue_dev_sb_attrs[] = {
1075 &dev_attr_se_bind.attr,
1076 &dev_attr_se_associate.attr,
1080 static struct attribute_group ap_queue_dev_sb_attr_group = {
1081 .attrs = ap_queue_dev_sb_attrs
1084 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1085 &ap_queue_dev_sb_attr_group,
1089 static void ap_queue_device_release(struct device *dev)
1091 struct ap_queue *aq = to_ap_queue(dev);
1093 spin_lock_bh(&ap_queues_lock);
1094 hash_del(&aq->hnode);
1095 spin_unlock_bh(&ap_queues_lock);
1100 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1102 struct ap_queue *aq;
1104 aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1107 aq->ap_dev.device.release = ap_queue_device_release;
1108 aq->ap_dev.device.type = &ap_queue_type;
1109 aq->ap_dev.device_type = device_type;
1110 // add optional SE secure binding attributes group
1111 if (ap_sb_available() && is_prot_virt_guest())
1112 aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1114 spin_lock_init(&aq->lock);
1115 INIT_LIST_HEAD(&aq->pendingq);
1116 INIT_LIST_HEAD(&aq->requestq);
1117 timer_setup(&aq->timeout, ap_request_timeout, 0);
1122 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1126 spin_lock_bh(&aq->lock);
1127 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1128 spin_unlock_bh(&aq->lock);
1130 EXPORT_SYMBOL(ap_queue_init_reply);
1133 * ap_queue_message(): Queue a request to an AP device.
1134 * @aq: The AP device to queue the message to
1135 * @ap_msg: The message that is to be added
1137 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1141 /* msg needs to have a valid receive-callback */
1142 BUG_ON(!ap_msg->receive);
1144 spin_lock_bh(&aq->lock);
1146 /* only allow to queue new messages if device state is ok */
1147 if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1148 list_add_tail(&ap_msg->list, &aq->requestq);
1149 aq->requestq_count++;
1150 aq->total_request_count++;
1151 atomic64_inc(&aq->card->total_request_count);
1156 /* Send/receive as many request from the queue as possible. */
1157 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1159 spin_unlock_bh(&aq->lock);
1163 EXPORT_SYMBOL(ap_queue_message);
1166 * ap_queue_usable(): Check if queue is usable just now.
1167 * @aq: The AP queue device to test for usability.
1168 * This function is intended for the scheduler to query if it makes
1169 * sense to enqueue a message into this AP queue device by calling
1170 * ap_queue_message(). The perspective is very short-term as the
1171 * state machine and device state(s) may change at any time.
1173 bool ap_queue_usable(struct ap_queue *aq)
1177 spin_lock_bh(&aq->lock);
1179 /* check for not configured or checkstopped */
1180 if (!aq->config || aq->chkstop) {
1182 goto unlock_and_out;
1185 /* device state needs to be ok */
1186 if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1188 goto unlock_and_out;
1191 /* SE guest's queues additionally need to be bound */
1192 if (ap_q_needs_bind(aq) &&
1193 !(aq->se_bstate == AP_BS_Q_USABLE ||
1194 aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1198 spin_unlock_bh(&aq->lock);
1201 EXPORT_SYMBOL(ap_queue_usable);
1204 * ap_cancel_message(): Cancel a crypto request.
1205 * @aq: The AP device that has the message queued
1206 * @ap_msg: The message that is to be removed
1208 * Cancel a crypto request. This is done by removing the request
1209 * from the device pending or request queue. Note that the
1210 * request stays on the AP queue. When it finishes the message
1211 * reply will be discarded because the psmid can't be found.
1213 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1215 struct ap_message *tmp;
1217 spin_lock_bh(&aq->lock);
1218 if (!list_empty(&ap_msg->list)) {
1219 list_for_each_entry(tmp, &aq->pendingq, list)
1220 if (tmp->psmid == ap_msg->psmid) {
1221 aq->pendingq_count--;
1224 aq->requestq_count--;
1226 list_del_init(&ap_msg->list);
1228 spin_unlock_bh(&aq->lock);
1230 EXPORT_SYMBOL(ap_cancel_message);
1233 * __ap_flush_queue(): Flush requests.
1234 * @aq: Pointer to the AP queue
1236 * Flush all requests from the request/pending queue of an AP device.
1238 static void __ap_flush_queue(struct ap_queue *aq)
1240 struct ap_message *ap_msg, *next;
1242 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1243 list_del_init(&ap_msg->list);
1244 aq->pendingq_count--;
1245 ap_msg->rc = -EAGAIN;
1246 ap_msg->receive(aq, ap_msg, NULL);
1248 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1249 list_del_init(&ap_msg->list);
1250 aq->requestq_count--;
1251 ap_msg->rc = -EAGAIN;
1252 ap_msg->receive(aq, ap_msg, NULL);
1254 aq->queue_count = 0;
1257 void ap_flush_queue(struct ap_queue *aq)
1259 spin_lock_bh(&aq->lock);
1260 __ap_flush_queue(aq);
1261 spin_unlock_bh(&aq->lock);
1263 EXPORT_SYMBOL(ap_flush_queue);
1265 void ap_queue_prepare_remove(struct ap_queue *aq)
1267 spin_lock_bh(&aq->lock);
1269 __ap_flush_queue(aq);
1270 /* move queue device state to SHUTDOWN in progress */
1271 aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1272 spin_unlock_bh(&aq->lock);
1273 del_timer_sync(&aq->timeout);
1276 void ap_queue_remove(struct ap_queue *aq)
1279 * all messages have been flushed and the device state
1280 * is SHUTDOWN. Now reset with zero which also clears
1281 * the irq registration and move the device state
1282 * to the initial value AP_DEV_STATE_UNINITIATED.
1284 spin_lock_bh(&aq->lock);
1285 ap_zapq(aq->qid, 0);
1286 aq->dev_state = AP_DEV_STATE_UNINITIATED;
1287 spin_unlock_bh(&aq->lock);
1290 void _ap_queue_init_state(struct ap_queue *aq)
1292 aq->dev_state = AP_DEV_STATE_OPERATING;
1293 aq->sm_state = AP_SM_STATE_RESET_START;
1294 aq->last_err_rc = 0;
1295 aq->assoc_idx = ASSOC_IDX_INVALID;
1296 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1299 void ap_queue_init_state(struct ap_queue *aq)
1301 spin_lock_bh(&aq->lock);
1302 _ap_queue_init_state(aq);
1303 spin_unlock_bh(&aq->lock);
1305 EXPORT_SYMBOL(ap_queue_init_state);