GNU Linux-libre 5.10.217-gnu1
[releases.git] / drivers / s390 / crypto / zcrypt_api.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Copyright IBM Corp. 2001, 2018
4  *  Author(s): Robert Burroughs
5  *             Eric Rossman (edrossma@us.ibm.com)
6  *             Cornelia Huck <cornelia.huck@de.ibm.com>
7  *
8  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10  *                                Ralph Wuerthner <rwuerthn@de.ibm.com>
11  *  MSGTYPE restruct:             Holger Dengler <hd@linux.vnet.ibm.com>
12  *  Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
13  */
14
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/miscdevice.h>
19 #include <linux/fs.h>
20 #include <linux/compat.h>
21 #include <linux/slab.h>
22 #include <linux/atomic.h>
23 #include <linux/uaccess.h>
24 #include <linux/hw_random.h>
25 #include <linux/debugfs.h>
26 #include <linux/cdev.h>
27 #include <linux/ctype.h>
28 #include <linux/capability.h>
29 #include <asm/debug.h>
30
31 #define CREATE_TRACE_POINTS
32 #include <asm/trace/zcrypt.h>
33
34 #include "zcrypt_api.h"
35 #include "zcrypt_debug.h"
36
37 #include "zcrypt_msgtype6.h"
38 #include "zcrypt_msgtype50.h"
39 #include "zcrypt_ccamisc.h"
40 #include "zcrypt_ep11misc.h"
41
42 /*
43  * Module description.
44  */
45 MODULE_AUTHOR("IBM Corporation");
46 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
47                    "Copyright IBM Corp. 2001, 2012");
48 MODULE_LICENSE("GPL");
49
50 /*
51  * zcrypt tracepoint functions
52  */
53 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
54 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
55
56 static int zcrypt_hwrng_seed = 1;
57 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440);
58 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
59
60 DEFINE_SPINLOCK(zcrypt_list_lock);
61 LIST_HEAD(zcrypt_card_list);
62 int zcrypt_device_count;
63
64 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
65 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
66
67 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
68 EXPORT_SYMBOL(zcrypt_rescan_req);
69
70 static LIST_HEAD(zcrypt_ops_list);
71
72 /* Zcrypt related debug feature stuff. */
73 debug_info_t *zcrypt_dbf_info;
74
75 /**
76  * Process a rescan of the transport layer.
77  *
78  * Returns 1, if the rescan has been processed, otherwise 0.
79  */
80 static inline int zcrypt_process_rescan(void)
81 {
82         if (atomic_read(&zcrypt_rescan_req)) {
83                 atomic_set(&zcrypt_rescan_req, 0);
84                 atomic_inc(&zcrypt_rescan_count);
85                 ap_bus_force_rescan();
86                 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n",
87                            atomic_inc_return(&zcrypt_rescan_count));
88                 return 1;
89         }
90         return 0;
91 }
92
93 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
94 {
95         list_add_tail(&zops->list, &zcrypt_ops_list);
96 }
97
98 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
99 {
100         list_del_init(&zops->list);
101 }
102
103 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
104 {
105         struct zcrypt_ops *zops;
106
107         list_for_each_entry(zops, &zcrypt_ops_list, list)
108                 if ((zops->variant == variant) &&
109                     (!strncmp(zops->name, name, sizeof(zops->name))))
110                         return zops;
111         return NULL;
112 }
113 EXPORT_SYMBOL(zcrypt_msgtype);
114
115 /*
116  * Multi device nodes extension functions.
117  */
118
119 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
120
121 struct zcdn_device;
122
123 static struct class *zcrypt_class;
124 static dev_t zcrypt_devt;
125 static struct cdev zcrypt_cdev;
126
127 struct zcdn_device {
128         struct device device;
129         struct ap_perms perms;
130 };
131
132 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
133
134 #define ZCDN_MAX_NAME 32
135
136 static int zcdn_create(const char *name);
137 static int zcdn_destroy(const char *name);
138
139 /*
140  * Find zcdn device by name.
141  * Returns reference to the zcdn device which needs to be released
142  * with put_device() after use.
143  */
144 static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
145 {
146         struct device *dev = class_find_device_by_name(zcrypt_class, name);
147
148         return dev ? to_zcdn_dev(dev) : NULL;
149 }
150
151 /*
152  * Find zcdn device by devt value.
153  * Returns reference to the zcdn device which needs to be released
154  * with put_device() after use.
155  */
156 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
157 {
158         struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
159
160         return dev ? to_zcdn_dev(dev) : NULL;
161 }
162
163 static ssize_t ioctlmask_show(struct device *dev,
164                               struct device_attribute *attr,
165                               char *buf)
166 {
167         int i, rc;
168         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
169
170         if (mutex_lock_interruptible(&ap_perms_mutex))
171                 return -ERESTARTSYS;
172
173         buf[0] = '0';
174         buf[1] = 'x';
175         for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
176                 snprintf(buf + 2 + 2 * i * sizeof(long),
177                          PAGE_SIZE - 2 - 2 * i * sizeof(long),
178                          "%016lx", zcdndev->perms.ioctlm[i]);
179         buf[2 + 2 * i * sizeof(long)] = '\n';
180         buf[2 + 2 * i * sizeof(long) + 1] = '\0';
181         rc = 2 + 2 * i * sizeof(long) + 1;
182
183         mutex_unlock(&ap_perms_mutex);
184
185         return rc;
186 }
187
188 static ssize_t ioctlmask_store(struct device *dev,
189                                struct device_attribute *attr,
190                                const char *buf, size_t count)
191 {
192         int rc;
193         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
194
195         rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
196                                AP_IOCTLS, &ap_perms_mutex);
197         if (rc)
198                 return rc;
199
200         return count;
201 }
202
203 static DEVICE_ATTR_RW(ioctlmask);
204
205 static ssize_t apmask_show(struct device *dev,
206                            struct device_attribute *attr,
207                            char *buf)
208 {
209         int i, rc;
210         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
211
212         if (mutex_lock_interruptible(&ap_perms_mutex))
213                 return -ERESTARTSYS;
214
215         buf[0] = '0';
216         buf[1] = 'x';
217         for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
218                 snprintf(buf + 2 + 2 * i * sizeof(long),
219                          PAGE_SIZE - 2 - 2 * i * sizeof(long),
220                          "%016lx", zcdndev->perms.apm[i]);
221         buf[2 + 2 * i * sizeof(long)] = '\n';
222         buf[2 + 2 * i * sizeof(long) + 1] = '\0';
223         rc = 2 + 2 * i * sizeof(long) + 1;
224
225         mutex_unlock(&ap_perms_mutex);
226
227         return rc;
228 }
229
230 static ssize_t apmask_store(struct device *dev,
231                             struct device_attribute *attr,
232                             const char *buf, size_t count)
233 {
234         int rc;
235         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
236
237         rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
238                                AP_DEVICES, &ap_perms_mutex);
239         if (rc)
240                 return rc;
241
242         return count;
243 }
244
245 static DEVICE_ATTR_RW(apmask);
246
247 static ssize_t aqmask_show(struct device *dev,
248                            struct device_attribute *attr,
249                            char *buf)
250 {
251         int i, rc;
252         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
253
254         if (mutex_lock_interruptible(&ap_perms_mutex))
255                 return -ERESTARTSYS;
256
257         buf[0] = '0';
258         buf[1] = 'x';
259         for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
260                 snprintf(buf + 2 + 2 * i * sizeof(long),
261                          PAGE_SIZE - 2 - 2 * i * sizeof(long),
262                          "%016lx", zcdndev->perms.aqm[i]);
263         buf[2 + 2 * i * sizeof(long)] = '\n';
264         buf[2 + 2 * i * sizeof(long) + 1] = '\0';
265         rc = 2 + 2 * i * sizeof(long) + 1;
266
267         mutex_unlock(&ap_perms_mutex);
268
269         return rc;
270 }
271
272 static ssize_t aqmask_store(struct device *dev,
273                             struct device_attribute *attr,
274                             const char *buf, size_t count)
275 {
276         int rc;
277         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
278
279         rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
280                                AP_DOMAINS, &ap_perms_mutex);
281         if (rc)
282                 return rc;
283
284         return count;
285 }
286
287 static DEVICE_ATTR_RW(aqmask);
288
289 static struct attribute *zcdn_dev_attrs[] = {
290         &dev_attr_ioctlmask.attr,
291         &dev_attr_apmask.attr,
292         &dev_attr_aqmask.attr,
293         NULL
294 };
295
296 static struct attribute_group zcdn_dev_attr_group = {
297         .attrs = zcdn_dev_attrs
298 };
299
300 static const struct attribute_group *zcdn_dev_attr_groups[] = {
301         &zcdn_dev_attr_group,
302         NULL
303 };
304
305 static ssize_t zcdn_create_store(struct class *class,
306                                  struct class_attribute *attr,
307                                  const char *buf, size_t count)
308 {
309         int rc;
310         char name[ZCDN_MAX_NAME];
311
312         strncpy(name, skip_spaces(buf), sizeof(name));
313         name[sizeof(name) - 1] = '\0';
314
315         rc = zcdn_create(strim(name));
316
317         return rc ? rc : count;
318 }
319
320 static const struct class_attribute class_attr_zcdn_create =
321         __ATTR(create, 0600, NULL, zcdn_create_store);
322
323 static ssize_t zcdn_destroy_store(struct class *class,
324                                   struct class_attribute *attr,
325                                   const char *buf, size_t count)
326 {
327         int rc;
328         char name[ZCDN_MAX_NAME];
329
330         strncpy(name, skip_spaces(buf), sizeof(name));
331         name[sizeof(name) - 1] = '\0';
332
333         rc = zcdn_destroy(strim(name));
334
335         return rc ? rc : count;
336 }
337
338 static const struct class_attribute class_attr_zcdn_destroy =
339         __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
340
341 static void zcdn_device_release(struct device *dev)
342 {
343         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
344
345         ZCRYPT_DBF(DBF_INFO, "releasing zcdn device %d:%d\n",
346                    MAJOR(dev->devt), MINOR(dev->devt));
347
348         kfree(zcdndev);
349 }
350
351 static int zcdn_create(const char *name)
352 {
353         dev_t devt;
354         int i, rc = 0;
355         char nodename[ZCDN_MAX_NAME];
356         struct zcdn_device *zcdndev;
357
358         if (mutex_lock_interruptible(&ap_perms_mutex))
359                 return -ERESTARTSYS;
360
361         /* check if device node with this name already exists */
362         if (name[0]) {
363                 zcdndev = find_zcdndev_by_name(name);
364                 if (zcdndev) {
365                         put_device(&zcdndev->device);
366                         rc = -EEXIST;
367                         goto unlockout;
368                 }
369         }
370
371         /* find an unused minor number */
372         for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
373                 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
374                 zcdndev = find_zcdndev_by_devt(devt);
375                 if (zcdndev)
376                         put_device(&zcdndev->device);
377                 else
378                         break;
379         }
380         if (i == ZCRYPT_MAX_MINOR_NODES) {
381                 rc = -ENOSPC;
382                 goto unlockout;
383         }
384
385         /* alloc and prepare a new zcdn device */
386         zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
387         if (!zcdndev) {
388                 rc = -ENOMEM;
389                 goto unlockout;
390         }
391         zcdndev->device.release = zcdn_device_release;
392         zcdndev->device.class = zcrypt_class;
393         zcdndev->device.devt = devt;
394         zcdndev->device.groups = zcdn_dev_attr_groups;
395         if (name[0])
396                 strncpy(nodename, name, sizeof(nodename));
397         else
398                 snprintf(nodename, sizeof(nodename),
399                          ZCRYPT_NAME "_%d", (int) MINOR(devt));
400         nodename[sizeof(nodename)-1] = '\0';
401         if (dev_set_name(&zcdndev->device, nodename)) {
402                 kfree(zcdndev);
403                 rc = -EINVAL;
404                 goto unlockout;
405         }
406         rc = device_register(&zcdndev->device);
407         if (rc) {
408                 put_device(&zcdndev->device);
409                 goto unlockout;
410         }
411
412         ZCRYPT_DBF(DBF_INFO, "created zcdn device %d:%d\n",
413                    MAJOR(devt), MINOR(devt));
414
415 unlockout:
416         mutex_unlock(&ap_perms_mutex);
417         return rc;
418 }
419
420 static int zcdn_destroy(const char *name)
421 {
422         int rc = 0;
423         struct zcdn_device *zcdndev;
424
425         if (mutex_lock_interruptible(&ap_perms_mutex))
426                 return -ERESTARTSYS;
427
428         /* try to find this zcdn device */
429         zcdndev = find_zcdndev_by_name(name);
430         if (!zcdndev) {
431                 rc = -ENOENT;
432                 goto unlockout;
433         }
434
435         /*
436          * The zcdn device is not hard destroyed. It is subject to
437          * reference counting and thus just needs to be unregistered.
438          */
439         put_device(&zcdndev->device);
440         device_unregister(&zcdndev->device);
441
442 unlockout:
443         mutex_unlock(&ap_perms_mutex);
444         return rc;
445 }
446
447 static void zcdn_destroy_all(void)
448 {
449         int i;
450         dev_t devt;
451         struct zcdn_device *zcdndev;
452
453         mutex_lock(&ap_perms_mutex);
454         for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
455                 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
456                 zcdndev = find_zcdndev_by_devt(devt);
457                 if (zcdndev) {
458                         put_device(&zcdndev->device);
459                         device_unregister(&zcdndev->device);
460                 }
461         }
462         mutex_unlock(&ap_perms_mutex);
463 }
464
465 #endif
466
467 /**
468  * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
469  *
470  * This function is not supported beyond zcrypt 1.3.1.
471  */
472 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
473                            size_t count, loff_t *f_pos)
474 {
475         return -EPERM;
476 }
477
478 /**
479  * zcrypt_write(): Not allowed.
480  *
481  * Write is is not allowed
482  */
483 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
484                             size_t count, loff_t *f_pos)
485 {
486         return -EPERM;
487 }
488
489 /**
490  * zcrypt_open(): Count number of users.
491  *
492  * Device open function to count number of users.
493  */
494 static int zcrypt_open(struct inode *inode, struct file *filp)
495 {
496         struct ap_perms *perms = &ap_perms;
497
498 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
499         if (filp->f_inode->i_cdev == &zcrypt_cdev) {
500                 struct zcdn_device *zcdndev;
501
502                 if (mutex_lock_interruptible(&ap_perms_mutex))
503                         return -ERESTARTSYS;
504                 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
505                 /* find returns a reference, no get_device() needed */
506                 mutex_unlock(&ap_perms_mutex);
507                 if (zcdndev)
508                         perms = &zcdndev->perms;
509         }
510 #endif
511         filp->private_data = (void *) perms;
512
513         atomic_inc(&zcrypt_open_count);
514         return stream_open(inode, filp);
515 }
516
517 /**
518  * zcrypt_release(): Count number of users.
519  *
520  * Device close function to count number of users.
521  */
522 static int zcrypt_release(struct inode *inode, struct file *filp)
523 {
524 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
525         if (filp->f_inode->i_cdev == &zcrypt_cdev) {
526                 struct zcdn_device *zcdndev;
527
528                 mutex_lock(&ap_perms_mutex);
529                 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
530                 mutex_unlock(&ap_perms_mutex);
531                 if (zcdndev) {
532                         /* 2 puts here: one for find, one for open */
533                         put_device(&zcdndev->device);
534                         put_device(&zcdndev->device);
535                 }
536         }
537 #endif
538
539         atomic_dec(&zcrypt_open_count);
540         return 0;
541 }
542
543 static inline int zcrypt_check_ioctl(struct ap_perms *perms,
544                                      unsigned int cmd)
545 {
546         int rc = -EPERM;
547         int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
548
549         if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
550                 if (test_bit_inv(ioctlnr, perms->ioctlm))
551                         rc = 0;
552         }
553
554         if (rc)
555                 ZCRYPT_DBF(DBF_WARN,
556                            "ioctl check failed: ioctlnr=0x%04x rc=%d\n",
557                            ioctlnr, rc);
558
559         return rc;
560 }
561
562 static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
563 {
564         return test_bit_inv(card, perms->apm) ? true : false;
565 }
566
567 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
568 {
569         return test_bit_inv(queue, perms->aqm) ? true : false;
570 }
571
572 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
573                                                      struct zcrypt_queue *zq,
574                                                      struct module **pmod,
575                                                      unsigned int weight)
576 {
577         if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
578                 return NULL;
579         zcrypt_card_get(zc);
580         zcrypt_queue_get(zq);
581         get_device(&zq->queue->ap_dev.device);
582         atomic_add(weight, &zc->load);
583         atomic_add(weight, &zq->load);
584         zq->request_count++;
585         *pmod = zq->queue->ap_dev.drv->driver.owner;
586         return zq;
587 }
588
589 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
590                                      struct zcrypt_queue *zq,
591                                      struct module *mod,
592                                      unsigned int weight)
593 {
594         zq->request_count--;
595         atomic_sub(weight, &zc->load);
596         atomic_sub(weight, &zq->load);
597         put_device(&zq->queue->ap_dev.device);
598         zcrypt_queue_put(zq);
599         zcrypt_card_put(zc);
600         module_put(mod);
601 }
602
603 static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
604                                        struct zcrypt_card *pref_zc,
605                                        unsigned int weight,
606                                        unsigned int pref_weight)
607 {
608         if (!pref_zc)
609                 return true;
610         weight += atomic_read(&zc->load);
611         pref_weight += atomic_read(&pref_zc->load);
612         if (weight == pref_weight)
613                 return atomic64_read(&zc->card->total_request_count) <
614                         atomic64_read(&pref_zc->card->total_request_count);
615         return weight < pref_weight;
616 }
617
618 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
619                                         struct zcrypt_queue *pref_zq,
620                                         unsigned int weight,
621                                         unsigned int pref_weight)
622 {
623         if (!pref_zq)
624                 return true;
625         weight += atomic_read(&zq->load);
626         pref_weight += atomic_read(&pref_zq->load);
627         if (weight == pref_weight)
628                 return zq->queue->total_request_count <
629                         pref_zq->queue->total_request_count;
630         return weight < pref_weight;
631 }
632
633 /*
634  * zcrypt ioctls.
635  */
636 static long zcrypt_rsa_modexpo(struct ap_perms *perms,
637                                struct zcrypt_track *tr,
638                                struct ica_rsa_modexpo *mex)
639 {
640         struct zcrypt_card *zc, *pref_zc;
641         struct zcrypt_queue *zq, *pref_zq;
642         struct ap_message ap_msg;
643         unsigned int wgt = 0, pref_wgt = 0;
644         unsigned int func_code;
645         int cpen, qpen, qid = 0, rc = -ENODEV;
646         struct module *mod;
647
648         trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
649
650         ap_init_message(&ap_msg);
651
652 #ifdef CONFIG_ZCRYPT_DEBUG
653         if (tr && tr->fi.cmd)
654                 ap_msg.fi.cmd = tr->fi.cmd;
655 #endif
656
657         if (mex->outputdatalength < mex->inputdatalength) {
658                 func_code = 0;
659                 rc = -EINVAL;
660                 goto out;
661         }
662
663         /*
664          * As long as outputdatalength is big enough, we can set the
665          * outputdatalength equal to the inputdatalength, since that is the
666          * number of bytes we will copy in any case
667          */
668         mex->outputdatalength = mex->inputdatalength;
669
670         rc = get_rsa_modex_fc(mex, &func_code);
671         if (rc)
672                 goto out;
673
674         pref_zc = NULL;
675         pref_zq = NULL;
676         spin_lock(&zcrypt_list_lock);
677         for_each_zcrypt_card(zc) {
678                 /* Check for useable accelarator or CCA card */
679                 if (!zc->online || !zc->card->config ||
680                     !(zc->card->functions & 0x18000000))
681                         continue;
682                 /* Check for size limits */
683                 if (zc->min_mod_size > mex->inputdatalength ||
684                     zc->max_mod_size < mex->inputdatalength)
685                         continue;
686                 /* check if device node has admission for this card */
687                 if (!zcrypt_check_card(perms, zc->card->id))
688                         continue;
689                 /* get weight index of the card device  */
690                 wgt = zc->speed_rating[func_code];
691                 /* penalty if this msg was previously sent via this card */
692                 cpen = (tr && tr->again_counter && tr->last_qid &&
693                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
694                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
695                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
696                         continue;
697                 for_each_zcrypt_queue(zq, zc) {
698                         /* check if device is useable and eligible */
699                         if (!zq->online || !zq->ops->rsa_modexpo ||
700                             !zq->queue->config)
701                                 continue;
702                         /* check if device node has admission for this queue */
703                         if (!zcrypt_check_queue(perms,
704                                                 AP_QID_QUEUE(zq->queue->qid)))
705                                 continue;
706                         /* penalty if the msg was previously sent at this qid */
707                         qpen = (tr && tr->again_counter && tr->last_qid &&
708                                 tr->last_qid == zq->queue->qid) ?
709                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
710                         if (!zcrypt_queue_compare(zq, pref_zq,
711                                                   wgt + cpen + qpen, pref_wgt))
712                                 continue;
713                         pref_zc = zc;
714                         pref_zq = zq;
715                         pref_wgt = wgt + cpen + qpen;
716                 }
717         }
718         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
719         spin_unlock(&zcrypt_list_lock);
720
721         if (!pref_zq) {
722                 rc = -ENODEV;
723                 goto out;
724         }
725
726         qid = pref_zq->queue->qid;
727         rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
728
729         spin_lock(&zcrypt_list_lock);
730         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
731         spin_unlock(&zcrypt_list_lock);
732
733 out:
734         ap_release_message(&ap_msg);
735         if (tr) {
736                 tr->last_rc = rc;
737                 tr->last_qid = qid;
738         }
739         trace_s390_zcrypt_rep(mex, func_code, rc,
740                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
741         return rc;
742 }
743
744 static long zcrypt_rsa_crt(struct ap_perms *perms,
745                            struct zcrypt_track *tr,
746                            struct ica_rsa_modexpo_crt *crt)
747 {
748         struct zcrypt_card *zc, *pref_zc;
749         struct zcrypt_queue *zq, *pref_zq;
750         struct ap_message ap_msg;
751         unsigned int wgt = 0, pref_wgt = 0;
752         unsigned int func_code;
753         int cpen, qpen, qid = 0, rc = -ENODEV;
754         struct module *mod;
755
756         trace_s390_zcrypt_req(crt, TP_ICARSACRT);
757
758         ap_init_message(&ap_msg);
759
760 #ifdef CONFIG_ZCRYPT_DEBUG
761         if (tr && tr->fi.cmd)
762                 ap_msg.fi.cmd = tr->fi.cmd;
763 #endif
764
765         if (crt->outputdatalength < crt->inputdatalength) {
766                 func_code = 0;
767                 rc = -EINVAL;
768                 goto out;
769         }
770
771         /*
772          * As long as outputdatalength is big enough, we can set the
773          * outputdatalength equal to the inputdatalength, since that is the
774          * number of bytes we will copy in any case
775          */
776         crt->outputdatalength = crt->inputdatalength;
777
778         rc = get_rsa_crt_fc(crt, &func_code);
779         if (rc)
780                 goto out;
781
782         pref_zc = NULL;
783         pref_zq = NULL;
784         spin_lock(&zcrypt_list_lock);
785         for_each_zcrypt_card(zc) {
786                 /* Check for useable accelarator or CCA card */
787                 if (!zc->online || !zc->card->config ||
788                     !(zc->card->functions & 0x18000000))
789                         continue;
790                 /* Check for size limits */
791                 if (zc->min_mod_size > crt->inputdatalength ||
792                     zc->max_mod_size < crt->inputdatalength)
793                         continue;
794                 /* check if device node has admission for this card */
795                 if (!zcrypt_check_card(perms, zc->card->id))
796                         continue;
797                 /* get weight index of the card device  */
798                 wgt = zc->speed_rating[func_code];
799                 /* penalty if this msg was previously sent via this card */
800                 cpen = (tr && tr->again_counter && tr->last_qid &&
801                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
802                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
803                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
804                         continue;
805                 for_each_zcrypt_queue(zq, zc) {
806                         /* check if device is useable and eligible */
807                         if (!zq->online || !zq->ops->rsa_modexpo_crt ||
808                             !zq->queue->config)
809                                 continue;
810                         /* check if device node has admission for this queue */
811                         if (!zcrypt_check_queue(perms,
812                                                 AP_QID_QUEUE(zq->queue->qid)))
813                                 continue;
814                         /* penalty if the msg was previously sent at this qid */
815                         qpen = (tr && tr->again_counter && tr->last_qid &&
816                                 tr->last_qid == zq->queue->qid) ?
817                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
818                         if (!zcrypt_queue_compare(zq, pref_zq,
819                                                   wgt + cpen + qpen, pref_wgt))
820                                 continue;
821                         pref_zc = zc;
822                         pref_zq = zq;
823                         pref_wgt = wgt + cpen + qpen;
824                 }
825         }
826         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
827         spin_unlock(&zcrypt_list_lock);
828
829         if (!pref_zq) {
830                 rc = -ENODEV;
831                 goto out;
832         }
833
834         qid = pref_zq->queue->qid;
835         rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
836
837         spin_lock(&zcrypt_list_lock);
838         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
839         spin_unlock(&zcrypt_list_lock);
840
841 out:
842         ap_release_message(&ap_msg);
843         if (tr) {
844                 tr->last_rc = rc;
845                 tr->last_qid = qid;
846         }
847         trace_s390_zcrypt_rep(crt, func_code, rc,
848                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
849         return rc;
850 }
851
852 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
853                               struct zcrypt_track *tr,
854                               struct ica_xcRB *xcRB)
855 {
856         struct zcrypt_card *zc, *pref_zc;
857         struct zcrypt_queue *zq, *pref_zq;
858         struct ap_message ap_msg;
859         unsigned int wgt = 0, pref_wgt = 0;
860         unsigned int func_code;
861         unsigned short *domain, tdom;
862         int cpen, qpen, qid = 0, rc = -ENODEV;
863         struct module *mod;
864
865         trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
866
867         xcRB->status = 0;
868         ap_init_message(&ap_msg);
869
870 #ifdef CONFIG_ZCRYPT_DEBUG
871         if (tr && tr->fi.cmd)
872                 ap_msg.fi.cmd = tr->fi.cmd;
873         if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) {
874                 ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n",
875                                 __func__, tr->fi.cmd);
876                 xcRB->agent_ID = 0x4646;
877         }
878 #endif
879
880         rc = get_cprb_fc(userspace, xcRB, &ap_msg, &func_code, &domain);
881         if (rc)
882                 goto out;
883
884         /*
885          * If a valid target domain is set and this domain is NOT a usage
886          * domain but a control only domain, use the default domain as target.
887          */
888         tdom = *domain;
889         if (tdom < AP_DOMAINS &&
890             !ap_test_config_usage_domain(tdom) &&
891             ap_test_config_ctrl_domain(tdom) &&
892             ap_domain_index >= 0)
893                 tdom = ap_domain_index;
894
895         pref_zc = NULL;
896         pref_zq = NULL;
897         spin_lock(&zcrypt_list_lock);
898         for_each_zcrypt_card(zc) {
899                 /* Check for useable CCA card */
900                 if (!zc->online || !zc->card->config ||
901                     !(zc->card->functions & 0x10000000))
902                         continue;
903                 /* Check for user selected CCA card */
904                 if (xcRB->user_defined != AUTOSELECT &&
905                     xcRB->user_defined != zc->card->id)
906                         continue;
907                 /* check if device node has admission for this card */
908                 if (!zcrypt_check_card(perms, zc->card->id))
909                         continue;
910                 /* get weight index of the card device  */
911                 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
912                 /* penalty if this msg was previously sent via this card */
913                 cpen = (tr && tr->again_counter && tr->last_qid &&
914                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
915                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
916                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
917                         continue;
918                 for_each_zcrypt_queue(zq, zc) {
919                         /* check for device useable and eligible */
920                         if (!zq->online ||
921                             !zq->ops->send_cprb ||
922                             !zq->queue->config ||
923                             (tdom != AUTOSEL_DOM &&
924                              tdom != AP_QID_QUEUE(zq->queue->qid)))
925                                 continue;
926                         /* check if device node has admission for this queue */
927                         if (!zcrypt_check_queue(perms,
928                                                 AP_QID_QUEUE(zq->queue->qid)))
929                                 continue;
930                         /* penalty if the msg was previously sent at this qid */
931                         qpen = (tr && tr->again_counter && tr->last_qid &&
932                                 tr->last_qid == zq->queue->qid) ?
933                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
934                         if (!zcrypt_queue_compare(zq, pref_zq,
935                                                   wgt + cpen + qpen, pref_wgt))
936                                 continue;
937                         pref_zc = zc;
938                         pref_zq = zq;
939                         pref_wgt = wgt + cpen + qpen;
940                 }
941         }
942         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
943         spin_unlock(&zcrypt_list_lock);
944
945         if (!pref_zq) {
946                 rc = -ENODEV;
947                 goto out;
948         }
949
950         /* in case of auto select, provide the correct domain */
951         qid = pref_zq->queue->qid;
952         if (*domain == AUTOSEL_DOM)
953                 *domain = AP_QID_QUEUE(qid);
954
955 #ifdef CONFIG_ZCRYPT_DEBUG
956         if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) {
957                 ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n",
958                                 __func__, tr->fi.cmd);
959                 *domain = 99;
960         }
961 #endif
962
963         rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg);
964
965         spin_lock(&zcrypt_list_lock);
966         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
967         spin_unlock(&zcrypt_list_lock);
968
969 out:
970         ap_release_message(&ap_msg);
971         if (tr) {
972                 tr->last_rc = rc;
973                 tr->last_qid = qid;
974         }
975         trace_s390_zcrypt_rep(xcRB, func_code, rc,
976                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
977         return rc;
978 }
979
980 long zcrypt_send_cprb(struct ica_xcRB *xcRB)
981 {
982         return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB);
983 }
984 EXPORT_SYMBOL(zcrypt_send_cprb);
985
986 static bool is_desired_ep11_card(unsigned int dev_id,
987                                  unsigned short target_num,
988                                  struct ep11_target_dev *targets)
989 {
990         while (target_num-- > 0) {
991                 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
992                         return true;
993                 targets++;
994         }
995         return false;
996 }
997
998 static bool is_desired_ep11_queue(unsigned int dev_qid,
999                                   unsigned short target_num,
1000                                   struct ep11_target_dev *targets)
1001 {
1002         int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
1003
1004         while (target_num-- > 0) {
1005                 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
1006                     (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
1007                         return true;
1008                 targets++;
1009         }
1010         return false;
1011 }
1012
1013 static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
1014                                    struct zcrypt_track *tr,
1015                                    struct ep11_urb *xcrb)
1016 {
1017         struct zcrypt_card *zc, *pref_zc;
1018         struct zcrypt_queue *zq, *pref_zq;
1019         struct ep11_target_dev *targets;
1020         unsigned short target_num;
1021         unsigned int wgt = 0, pref_wgt = 0;
1022         unsigned int func_code;
1023         struct ap_message ap_msg;
1024         int cpen, qpen, qid = 0, rc = -ENODEV;
1025         struct module *mod;
1026
1027         trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
1028
1029         ap_init_message(&ap_msg);
1030
1031 #ifdef CONFIG_ZCRYPT_DEBUG
1032         if (tr && tr->fi.cmd)
1033                 ap_msg.fi.cmd = tr->fi.cmd;
1034 #endif
1035
1036         target_num = (unsigned short) xcrb->targets_num;
1037
1038         /* empty list indicates autoselect (all available targets) */
1039         targets = NULL;
1040         if (target_num != 0) {
1041                 struct ep11_target_dev __user *uptr;
1042
1043                 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
1044                 if (!targets) {
1045                         func_code = 0;
1046                         rc = -ENOMEM;
1047                         goto out;
1048                 }
1049
1050                 uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
1051                 if (z_copy_from_user(userspace, targets, uptr,
1052                                    target_num * sizeof(*targets))) {
1053                         func_code = 0;
1054                         rc = -EFAULT;
1055                         goto out_free;
1056                 }
1057         }
1058
1059         rc = get_ep11cprb_fc(userspace, xcrb, &ap_msg, &func_code);
1060         if (rc)
1061                 goto out_free;
1062
1063         pref_zc = NULL;
1064         pref_zq = NULL;
1065         spin_lock(&zcrypt_list_lock);
1066         for_each_zcrypt_card(zc) {
1067                 /* Check for useable EP11 card */
1068                 if (!zc->online || !zc->card->config ||
1069                     !(zc->card->functions & 0x04000000))
1070                         continue;
1071                 /* Check for user selected EP11 card */
1072                 if (targets &&
1073                     !is_desired_ep11_card(zc->card->id, target_num, targets))
1074                         continue;
1075                 /* check if device node has admission for this card */
1076                 if (!zcrypt_check_card(perms, zc->card->id))
1077                         continue;
1078                 /* get weight index of the card device  */
1079                 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
1080                 /* penalty if this msg was previously sent via this card */
1081                 cpen = (tr && tr->again_counter && tr->last_qid &&
1082                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
1083                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
1084                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
1085                         continue;
1086                 for_each_zcrypt_queue(zq, zc) {
1087                         /* check if device is useable and eligible */
1088                         if (!zq->online ||
1089                             !zq->ops->send_ep11_cprb ||
1090                             !zq->queue->config ||
1091                             (targets &&
1092                              !is_desired_ep11_queue(zq->queue->qid,
1093                                                     target_num, targets)))
1094                                 continue;
1095                         /* check if device node has admission for this queue */
1096                         if (!zcrypt_check_queue(perms,
1097                                                 AP_QID_QUEUE(zq->queue->qid)))
1098                                 continue;
1099                         /* penalty if the msg was previously sent at this qid */
1100                         qpen = (tr && tr->again_counter && tr->last_qid &&
1101                                 tr->last_qid == zq->queue->qid) ?
1102                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
1103                         if (!zcrypt_queue_compare(zq, pref_zq,
1104                                                   wgt + cpen + qpen, pref_wgt))
1105                                 continue;
1106                         pref_zc = zc;
1107                         pref_zq = zq;
1108                         pref_wgt = wgt + cpen + qpen;
1109                 }
1110         }
1111         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1112         spin_unlock(&zcrypt_list_lock);
1113
1114         if (!pref_zq) {
1115                 rc = -ENODEV;
1116                 goto out_free;
1117         }
1118
1119         qid = pref_zq->queue->qid;
1120         rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
1121
1122         spin_lock(&zcrypt_list_lock);
1123         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1124         spin_unlock(&zcrypt_list_lock);
1125
1126 out_free:
1127         kfree(targets);
1128 out:
1129         ap_release_message(&ap_msg);
1130         if (tr) {
1131                 tr->last_rc = rc;
1132                 tr->last_qid = qid;
1133         }
1134         trace_s390_zcrypt_rep(xcrb, func_code, rc,
1135                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1136         return rc;
1137 }
1138
1139 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
1140 {
1141         return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
1142 }
1143 EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
1144
1145 static long zcrypt_rng(char *buffer)
1146 {
1147         struct zcrypt_card *zc, *pref_zc;
1148         struct zcrypt_queue *zq, *pref_zq;
1149         unsigned int wgt = 0, pref_wgt = 0;
1150         unsigned int func_code;
1151         struct ap_message ap_msg;
1152         unsigned int domain;
1153         int qid = 0, rc = -ENODEV;
1154         struct module *mod;
1155
1156         trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1157
1158         ap_init_message(&ap_msg);
1159         rc = get_rng_fc(&ap_msg, &func_code, &domain);
1160         if (rc)
1161                 goto out;
1162
1163         pref_zc = NULL;
1164         pref_zq = NULL;
1165         spin_lock(&zcrypt_list_lock);
1166         for_each_zcrypt_card(zc) {
1167                 /* Check for useable CCA card */
1168                 if (!zc->online || !zc->card->config ||
1169                     !(zc->card->functions & 0x10000000))
1170                         continue;
1171                 /* get weight index of the card device  */
1172                 wgt = zc->speed_rating[func_code];
1173                 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
1174                         continue;
1175                 for_each_zcrypt_queue(zq, zc) {
1176                         /* check if device is useable and eligible */
1177                         if (!zq->online || !zq->ops->rng ||
1178                             !zq->queue->config)
1179                                 continue;
1180                         if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
1181                                 continue;
1182                         pref_zc = zc;
1183                         pref_zq = zq;
1184                         pref_wgt = wgt;
1185                 }
1186         }
1187         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1188         spin_unlock(&zcrypt_list_lock);
1189
1190         if (!pref_zq) {
1191                 rc = -ENODEV;
1192                 goto out;
1193         }
1194
1195         qid = pref_zq->queue->qid;
1196         rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1197
1198         spin_lock(&zcrypt_list_lock);
1199         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1200         spin_unlock(&zcrypt_list_lock);
1201
1202 out:
1203         ap_release_message(&ap_msg);
1204         trace_s390_zcrypt_rep(buffer, func_code, rc,
1205                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1206         return rc;
1207 }
1208
1209 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
1210 {
1211         struct zcrypt_card *zc;
1212         struct zcrypt_queue *zq;
1213         struct zcrypt_device_status *stat;
1214         int card, queue;
1215
1216         memset(devstatus, 0, MAX_ZDEV_ENTRIES
1217                * sizeof(struct zcrypt_device_status));
1218
1219         spin_lock(&zcrypt_list_lock);
1220         for_each_zcrypt_card(zc) {
1221                 for_each_zcrypt_queue(zq, zc) {
1222                         card = AP_QID_CARD(zq->queue->qid);
1223                         if (card >= MAX_ZDEV_CARDIDS)
1224                                 continue;
1225                         queue = AP_QID_QUEUE(zq->queue->qid);
1226                         stat = &devstatus[card * AP_DOMAINS + queue];
1227                         stat->hwtype = zc->card->ap_dev.device_type;
1228                         stat->functions = zc->card->functions >> 26;
1229                         stat->qid = zq->queue->qid;
1230                         stat->online = zq->online ? 0x01 : 0x00;
1231                 }
1232         }
1233         spin_unlock(&zcrypt_list_lock);
1234 }
1235
1236 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
1237 {
1238         struct zcrypt_card *zc;
1239         struct zcrypt_queue *zq;
1240         struct zcrypt_device_status_ext *stat;
1241         int card, queue;
1242
1243         memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
1244                * sizeof(struct zcrypt_device_status_ext));
1245
1246         spin_lock(&zcrypt_list_lock);
1247         for_each_zcrypt_card(zc) {
1248                 for_each_zcrypt_queue(zq, zc) {
1249                         card = AP_QID_CARD(zq->queue->qid);
1250                         queue = AP_QID_QUEUE(zq->queue->qid);
1251                         stat = &devstatus[card * AP_DOMAINS + queue];
1252                         stat->hwtype = zc->card->ap_dev.device_type;
1253                         stat->functions = zc->card->functions >> 26;
1254                         stat->qid = zq->queue->qid;
1255                         stat->online = zq->online ? 0x01 : 0x00;
1256                 }
1257         }
1258         spin_unlock(&zcrypt_list_lock);
1259 }
1260 EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
1261
1262 int zcrypt_device_status_ext(int card, int queue,
1263                              struct zcrypt_device_status_ext *devstat)
1264 {
1265         struct zcrypt_card *zc;
1266         struct zcrypt_queue *zq;
1267
1268         memset(devstat, 0, sizeof(*devstat));
1269
1270         spin_lock(&zcrypt_list_lock);
1271         for_each_zcrypt_card(zc) {
1272                 for_each_zcrypt_queue(zq, zc) {
1273                         if (card == AP_QID_CARD(zq->queue->qid) &&
1274                             queue == AP_QID_QUEUE(zq->queue->qid)) {
1275                                 devstat->hwtype = zc->card->ap_dev.device_type;
1276                                 devstat->functions = zc->card->functions >> 26;
1277                                 devstat->qid = zq->queue->qid;
1278                                 devstat->online = zq->online ? 0x01 : 0x00;
1279                                 spin_unlock(&zcrypt_list_lock);
1280                                 return 0;
1281                         }
1282                 }
1283         }
1284         spin_unlock(&zcrypt_list_lock);
1285
1286         return -ENODEV;
1287 }
1288 EXPORT_SYMBOL(zcrypt_device_status_ext);
1289
1290 static void zcrypt_status_mask(char status[], size_t max_adapters)
1291 {
1292         struct zcrypt_card *zc;
1293         struct zcrypt_queue *zq;
1294         int card;
1295
1296         memset(status, 0, max_adapters);
1297         spin_lock(&zcrypt_list_lock);
1298         for_each_zcrypt_card(zc) {
1299                 for_each_zcrypt_queue(zq, zc) {
1300                         card = AP_QID_CARD(zq->queue->qid);
1301                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
1302                             || card >= max_adapters)
1303                                 continue;
1304                         status[card] = zc->online ? zc->user_space_type : 0x0d;
1305                 }
1306         }
1307         spin_unlock(&zcrypt_list_lock);
1308 }
1309
1310 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
1311 {
1312         struct zcrypt_card *zc;
1313         struct zcrypt_queue *zq;
1314         int card;
1315
1316         memset(qdepth, 0, max_adapters);
1317         spin_lock(&zcrypt_list_lock);
1318         local_bh_disable();
1319         for_each_zcrypt_card(zc) {
1320                 for_each_zcrypt_queue(zq, zc) {
1321                         card = AP_QID_CARD(zq->queue->qid);
1322                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
1323                             || card >= max_adapters)
1324                                 continue;
1325                         spin_lock(&zq->queue->lock);
1326                         qdepth[card] =
1327                                 zq->queue->pendingq_count +
1328                                 zq->queue->requestq_count;
1329                         spin_unlock(&zq->queue->lock);
1330                 }
1331         }
1332         local_bh_enable();
1333         spin_unlock(&zcrypt_list_lock);
1334 }
1335
1336 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
1337 {
1338         struct zcrypt_card *zc;
1339         struct zcrypt_queue *zq;
1340         int card;
1341         u64 cnt;
1342
1343         memset(reqcnt, 0, sizeof(int) * max_adapters);
1344         spin_lock(&zcrypt_list_lock);
1345         local_bh_disable();
1346         for_each_zcrypt_card(zc) {
1347                 for_each_zcrypt_queue(zq, zc) {
1348                         card = AP_QID_CARD(zq->queue->qid);
1349                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
1350                             || card >= max_adapters)
1351                                 continue;
1352                         spin_lock(&zq->queue->lock);
1353                         cnt = zq->queue->total_request_count;
1354                         spin_unlock(&zq->queue->lock);
1355                         reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
1356                 }
1357         }
1358         local_bh_enable();
1359         spin_unlock(&zcrypt_list_lock);
1360 }
1361
1362 static int zcrypt_pendingq_count(void)
1363 {
1364         struct zcrypt_card *zc;
1365         struct zcrypt_queue *zq;
1366         int pendingq_count;
1367
1368         pendingq_count = 0;
1369         spin_lock(&zcrypt_list_lock);
1370         local_bh_disable();
1371         for_each_zcrypt_card(zc) {
1372                 for_each_zcrypt_queue(zq, zc) {
1373                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1374                                 continue;
1375                         spin_lock(&zq->queue->lock);
1376                         pendingq_count += zq->queue->pendingq_count;
1377                         spin_unlock(&zq->queue->lock);
1378                 }
1379         }
1380         local_bh_enable();
1381         spin_unlock(&zcrypt_list_lock);
1382         return pendingq_count;
1383 }
1384
1385 static int zcrypt_requestq_count(void)
1386 {
1387         struct zcrypt_card *zc;
1388         struct zcrypt_queue *zq;
1389         int requestq_count;
1390
1391         requestq_count = 0;
1392         spin_lock(&zcrypt_list_lock);
1393         local_bh_disable();
1394         for_each_zcrypt_card(zc) {
1395                 for_each_zcrypt_queue(zq, zc) {
1396                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1397                                 continue;
1398                         spin_lock(&zq->queue->lock);
1399                         requestq_count += zq->queue->requestq_count;
1400                         spin_unlock(&zq->queue->lock);
1401                 }
1402         }
1403         local_bh_enable();
1404         spin_unlock(&zcrypt_list_lock);
1405         return requestq_count;
1406 }
1407
1408 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
1409 {
1410         int rc;
1411         struct zcrypt_track tr;
1412         struct ica_rsa_modexpo mex;
1413         struct ica_rsa_modexpo __user *umex = (void __user *) arg;
1414
1415         memset(&tr, 0, sizeof(tr));
1416         if (copy_from_user(&mex, umex, sizeof(mex)))
1417                 return -EFAULT;
1418
1419 #ifdef CONFIG_ZCRYPT_DEBUG
1420         if (mex.inputdatalength & (1U << 31)) {
1421                 if (!capable(CAP_SYS_ADMIN))
1422                         return -EPERM;
1423                 tr.fi.cmd = (u16)(mex.inputdatalength >> 16);
1424         }
1425         mex.inputdatalength &= 0x0000FFFF;
1426 #endif
1427
1428         do {
1429                 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1430                 if (rc == -EAGAIN)
1431                         tr.again_counter++;
1432 #ifdef CONFIG_ZCRYPT_DEBUG
1433                 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1434                         break;
1435 #endif
1436         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1437         /* on failure: retry once again after a requested rescan */
1438         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1439                 do {
1440                         rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1441                         if (rc == -EAGAIN)
1442                                 tr.again_counter++;
1443                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1444         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1445                 rc = -EIO;
1446         if (rc) {
1447                 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
1448                 return rc;
1449         }
1450         return put_user(mex.outputdatalength, &umex->outputdatalength);
1451 }
1452
1453 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
1454 {
1455         int rc;
1456         struct zcrypt_track tr;
1457         struct ica_rsa_modexpo_crt crt;
1458         struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
1459
1460         memset(&tr, 0, sizeof(tr));
1461         if (copy_from_user(&crt, ucrt, sizeof(crt)))
1462                 return -EFAULT;
1463
1464 #ifdef CONFIG_ZCRYPT_DEBUG
1465         if (crt.inputdatalength & (1U << 31)) {
1466                 if (!capable(CAP_SYS_ADMIN))
1467                         return -EPERM;
1468                 tr.fi.cmd = (u16)(crt.inputdatalength >> 16);
1469         }
1470         crt.inputdatalength &= 0x0000FFFF;
1471 #endif
1472
1473         do {
1474                 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1475                 if (rc == -EAGAIN)
1476                         tr.again_counter++;
1477 #ifdef CONFIG_ZCRYPT_DEBUG
1478                 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1479                         break;
1480 #endif
1481         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1482         /* on failure: retry once again after a requested rescan */
1483         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1484                 do {
1485                         rc = zcrypt_rsa_crt(perms, &tr, &crt);
1486                         if (rc == -EAGAIN)
1487                                 tr.again_counter++;
1488                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1489         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1490                 rc = -EIO;
1491         if (rc) {
1492                 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
1493                 return rc;
1494         }
1495         return put_user(crt.outputdatalength, &ucrt->outputdatalength);
1496 }
1497
1498 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
1499 {
1500         int rc;
1501         struct ica_xcRB xcRB;
1502         struct zcrypt_track tr;
1503         struct ica_xcRB __user *uxcRB = (void __user *) arg;
1504
1505         memset(&tr, 0, sizeof(tr));
1506         if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
1507                 return -EFAULT;
1508
1509 #ifdef CONFIG_ZCRYPT_DEBUG
1510         if (xcRB.status & (1U << 31)) {
1511                 if (!capable(CAP_SYS_ADMIN))
1512                         return -EPERM;
1513                 tr.fi.cmd = (u16)(xcRB.status >> 16);
1514         }
1515         xcRB.status &= 0x0000FFFF;
1516 #endif
1517
1518         do {
1519                 rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
1520                 if (rc == -EAGAIN)
1521                         tr.again_counter++;
1522 #ifdef CONFIG_ZCRYPT_DEBUG
1523                 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1524                         break;
1525 #endif
1526         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1527         /* on failure: retry once again after a requested rescan */
1528         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1529                 do {
1530                         rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
1531                         if (rc == -EAGAIN)
1532                                 tr.again_counter++;
1533                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1534         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1535                 rc = -EIO;
1536         if (rc)
1537                 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
1538                            rc, xcRB.status);
1539         if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
1540                 return -EFAULT;
1541         return rc;
1542 }
1543
1544 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
1545 {
1546         int rc;
1547         struct ep11_urb xcrb;
1548         struct zcrypt_track tr;
1549         struct ep11_urb __user *uxcrb = (void __user *)arg;
1550
1551         memset(&tr, 0, sizeof(tr));
1552         if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1553                 return -EFAULT;
1554
1555 #ifdef CONFIG_ZCRYPT_DEBUG
1556         if (xcrb.req_len & (1ULL << 63)) {
1557                 if (!capable(CAP_SYS_ADMIN))
1558                         return -EPERM;
1559                 tr.fi.cmd = (u16)(xcrb.req_len >> 48);
1560         }
1561         xcrb.req_len &= 0x0000FFFFFFFFFFFFULL;
1562 #endif
1563
1564         do {
1565                 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1566                 if (rc == -EAGAIN)
1567                         tr.again_counter++;
1568 #ifdef CONFIG_ZCRYPT_DEBUG
1569                 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1570                         break;
1571 #endif
1572         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1573         /* on failure: retry once again after a requested rescan */
1574         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1575                 do {
1576                         rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1577                         if (rc == -EAGAIN)
1578                                 tr.again_counter++;
1579                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1580         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1581                 rc = -EIO;
1582         if (rc)
1583                 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
1584         if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1585                 return -EFAULT;
1586         return rc;
1587 }
1588
1589 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1590                                   unsigned long arg)
1591 {
1592         int rc;
1593         struct ap_perms *perms =
1594                 (struct ap_perms *) filp->private_data;
1595
1596         rc = zcrypt_check_ioctl(perms, cmd);
1597         if (rc)
1598                 return rc;
1599
1600         switch (cmd) {
1601         case ICARSAMODEXPO:
1602                 return icarsamodexpo_ioctl(perms, arg);
1603         case ICARSACRT:
1604                 return icarsacrt_ioctl(perms, arg);
1605         case ZSECSENDCPRB:
1606                 return zsecsendcprb_ioctl(perms, arg);
1607         case ZSENDEP11CPRB:
1608                 return zsendep11cprb_ioctl(perms, arg);
1609         case ZCRYPT_DEVICE_STATUS: {
1610                 struct zcrypt_device_status_ext *device_status;
1611                 size_t total_size = MAX_ZDEV_ENTRIES_EXT
1612                         * sizeof(struct zcrypt_device_status_ext);
1613
1614                 device_status = kzalloc(total_size, GFP_KERNEL);
1615                 if (!device_status)
1616                         return -ENOMEM;
1617                 zcrypt_device_status_mask_ext(device_status);
1618                 if (copy_to_user((char __user *) arg, device_status,
1619                                  total_size))
1620                         rc = -EFAULT;
1621                 kfree(device_status);
1622                 return rc;
1623         }
1624         case ZCRYPT_STATUS_MASK: {
1625                 char status[AP_DEVICES];
1626
1627                 zcrypt_status_mask(status, AP_DEVICES);
1628                 if (copy_to_user((char __user *) arg, status, sizeof(status)))
1629                         return -EFAULT;
1630                 return 0;
1631         }
1632         case ZCRYPT_QDEPTH_MASK: {
1633                 char qdepth[AP_DEVICES];
1634
1635                 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
1636                 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
1637                         return -EFAULT;
1638                 return 0;
1639         }
1640         case ZCRYPT_PERDEV_REQCNT: {
1641                 u32 *reqcnt;
1642
1643                 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
1644                 if (!reqcnt)
1645                         return -ENOMEM;
1646                 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1647                 if (copy_to_user((int __user *) arg, reqcnt,
1648                                  sizeof(u32) * AP_DEVICES))
1649                         rc = -EFAULT;
1650                 kfree(reqcnt);
1651                 return rc;
1652         }
1653         case Z90STAT_REQUESTQ_COUNT:
1654                 return put_user(zcrypt_requestq_count(), (int __user *) arg);
1655         case Z90STAT_PENDINGQ_COUNT:
1656                 return put_user(zcrypt_pendingq_count(), (int __user *) arg);
1657         case Z90STAT_TOTALOPEN_COUNT:
1658                 return put_user(atomic_read(&zcrypt_open_count),
1659                                 (int __user *) arg);
1660         case Z90STAT_DOMAIN_INDEX:
1661                 return put_user(ap_domain_index, (int __user *) arg);
1662         /*
1663          * Deprecated ioctls
1664          */
1665         case ZDEVICESTATUS: {
1666                 /* the old ioctl supports only 64 adapters */
1667                 struct zcrypt_device_status *device_status;
1668                 size_t total_size = MAX_ZDEV_ENTRIES
1669                         * sizeof(struct zcrypt_device_status);
1670
1671                 device_status = kzalloc(total_size, GFP_KERNEL);
1672                 if (!device_status)
1673                         return -ENOMEM;
1674                 zcrypt_device_status_mask(device_status);
1675                 if (copy_to_user((char __user *) arg, device_status,
1676                                  total_size))
1677                         rc = -EFAULT;
1678                 kfree(device_status);
1679                 return rc;
1680         }
1681         case Z90STAT_STATUS_MASK: {
1682                 /* the old ioctl supports only 64 adapters */
1683                 char status[MAX_ZDEV_CARDIDS];
1684
1685                 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
1686                 if (copy_to_user((char __user *) arg, status, sizeof(status)))
1687                         return -EFAULT;
1688                 return 0;
1689         }
1690         case Z90STAT_QDEPTH_MASK: {
1691                 /* the old ioctl supports only 64 adapters */
1692                 char qdepth[MAX_ZDEV_CARDIDS];
1693
1694                 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
1695                 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
1696                         return -EFAULT;
1697                 return 0;
1698         }
1699         case Z90STAT_PERDEV_REQCNT: {
1700                 /* the old ioctl supports only 64 adapters */
1701                 u32 reqcnt[MAX_ZDEV_CARDIDS];
1702
1703                 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1704                 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
1705                         return -EFAULT;
1706                 return 0;
1707         }
1708         /* unknown ioctl number */
1709         default:
1710                 ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
1711                 return -ENOIOCTLCMD;
1712         }
1713 }
1714
1715 #ifdef CONFIG_COMPAT
1716 /*
1717  * ioctl32 conversion routines
1718  */
1719 struct compat_ica_rsa_modexpo {
1720         compat_uptr_t   inputdata;
1721         unsigned int    inputdatalength;
1722         compat_uptr_t   outputdata;
1723         unsigned int    outputdatalength;
1724         compat_uptr_t   b_key;
1725         compat_uptr_t   n_modulus;
1726 };
1727
1728 static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
1729                             unsigned int cmd, unsigned long arg)
1730 {
1731         struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
1732         struct compat_ica_rsa_modexpo mex32;
1733         struct ica_rsa_modexpo mex64;
1734         struct zcrypt_track tr;
1735         long rc;
1736
1737         memset(&tr, 0, sizeof(tr));
1738         if (copy_from_user(&mex32, umex32, sizeof(mex32)))
1739                 return -EFAULT;
1740         mex64.inputdata = compat_ptr(mex32.inputdata);
1741         mex64.inputdatalength = mex32.inputdatalength;
1742         mex64.outputdata = compat_ptr(mex32.outputdata);
1743         mex64.outputdatalength = mex32.outputdatalength;
1744         mex64.b_key = compat_ptr(mex32.b_key);
1745         mex64.n_modulus = compat_ptr(mex32.n_modulus);
1746         do {
1747                 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1748                 if (rc == -EAGAIN)
1749                         tr.again_counter++;
1750         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1751         /* on failure: retry once again after a requested rescan */
1752         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1753                 do {
1754                         rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1755                         if (rc == -EAGAIN)
1756                                 tr.again_counter++;
1757                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1758         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1759                 rc = -EIO;
1760         if (rc)
1761                 return rc;
1762         return put_user(mex64.outputdatalength,
1763                         &umex32->outputdatalength);
1764 }
1765
1766 struct compat_ica_rsa_modexpo_crt {
1767         compat_uptr_t   inputdata;
1768         unsigned int    inputdatalength;
1769         compat_uptr_t   outputdata;
1770         unsigned int    outputdatalength;
1771         compat_uptr_t   bp_key;
1772         compat_uptr_t   bq_key;
1773         compat_uptr_t   np_prime;
1774         compat_uptr_t   nq_prime;
1775         compat_uptr_t   u_mult_inv;
1776 };
1777
1778 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
1779                                 unsigned int cmd, unsigned long arg)
1780 {
1781         struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
1782         struct compat_ica_rsa_modexpo_crt crt32;
1783         struct ica_rsa_modexpo_crt crt64;
1784         struct zcrypt_track tr;
1785         long rc;
1786
1787         memset(&tr, 0, sizeof(tr));
1788         if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
1789                 return -EFAULT;
1790         crt64.inputdata = compat_ptr(crt32.inputdata);
1791         crt64.inputdatalength = crt32.inputdatalength;
1792         crt64.outputdata = compat_ptr(crt32.outputdata);
1793         crt64.outputdatalength = crt32.outputdatalength;
1794         crt64.bp_key = compat_ptr(crt32.bp_key);
1795         crt64.bq_key = compat_ptr(crt32.bq_key);
1796         crt64.np_prime = compat_ptr(crt32.np_prime);
1797         crt64.nq_prime = compat_ptr(crt32.nq_prime);
1798         crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
1799         do {
1800                 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1801                 if (rc == -EAGAIN)
1802                         tr.again_counter++;
1803         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1804         /* on failure: retry once again after a requested rescan */
1805         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1806                 do {
1807                         rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1808                         if (rc == -EAGAIN)
1809                                 tr.again_counter++;
1810                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1811         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1812                 rc = -EIO;
1813         if (rc)
1814                 return rc;
1815         return put_user(crt64.outputdatalength,
1816                         &ucrt32->outputdatalength);
1817 }
1818
1819 struct compat_ica_xcRB {
1820         unsigned short  agent_ID;
1821         unsigned int    user_defined;
1822         unsigned short  request_ID;
1823         unsigned int    request_control_blk_length;
1824         unsigned char   padding1[16 - sizeof(compat_uptr_t)];
1825         compat_uptr_t   request_control_blk_addr;
1826         unsigned int    request_data_length;
1827         char            padding2[16 - sizeof(compat_uptr_t)];
1828         compat_uptr_t   request_data_address;
1829         unsigned int    reply_control_blk_length;
1830         char            padding3[16 - sizeof(compat_uptr_t)];
1831         compat_uptr_t   reply_control_blk_addr;
1832         unsigned int    reply_data_length;
1833         char            padding4[16 - sizeof(compat_uptr_t)];
1834         compat_uptr_t   reply_data_addr;
1835         unsigned short  priority_window;
1836         unsigned int    status;
1837 } __packed;
1838
1839 static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
1840                          unsigned int cmd, unsigned long arg)
1841 {
1842         struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
1843         struct compat_ica_xcRB xcRB32;
1844         struct zcrypt_track tr;
1845         struct ica_xcRB xcRB64;
1846         long rc;
1847
1848         memset(&tr, 0, sizeof(tr));
1849         if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
1850                 return -EFAULT;
1851         xcRB64.agent_ID = xcRB32.agent_ID;
1852         xcRB64.user_defined = xcRB32.user_defined;
1853         xcRB64.request_ID = xcRB32.request_ID;
1854         xcRB64.request_control_blk_length =
1855                 xcRB32.request_control_blk_length;
1856         xcRB64.request_control_blk_addr =
1857                 compat_ptr(xcRB32.request_control_blk_addr);
1858         xcRB64.request_data_length =
1859                 xcRB32.request_data_length;
1860         xcRB64.request_data_address =
1861                 compat_ptr(xcRB32.request_data_address);
1862         xcRB64.reply_control_blk_length =
1863                 xcRB32.reply_control_blk_length;
1864         xcRB64.reply_control_blk_addr =
1865                 compat_ptr(xcRB32.reply_control_blk_addr);
1866         xcRB64.reply_data_length = xcRB32.reply_data_length;
1867         xcRB64.reply_data_addr =
1868                 compat_ptr(xcRB32.reply_data_addr);
1869         xcRB64.priority_window = xcRB32.priority_window;
1870         xcRB64.status = xcRB32.status;
1871         do {
1872                 rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
1873                 if (rc == -EAGAIN)
1874                         tr.again_counter++;
1875         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1876         /* on failure: retry once again after a requested rescan */
1877         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1878                 do {
1879                         rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
1880                         if (rc == -EAGAIN)
1881                                 tr.again_counter++;
1882                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1883         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1884                 rc = -EIO;
1885         xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
1886         xcRB32.reply_data_length = xcRB64.reply_data_length;
1887         xcRB32.status = xcRB64.status;
1888         if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
1889                 return -EFAULT;
1890         return rc;
1891 }
1892
1893 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
1894                          unsigned long arg)
1895 {
1896         int rc;
1897         struct ap_perms *perms =
1898                 (struct ap_perms *) filp->private_data;
1899
1900         rc = zcrypt_check_ioctl(perms, cmd);
1901         if (rc)
1902                 return rc;
1903
1904         if (cmd == ICARSAMODEXPO)
1905                 return trans_modexpo32(perms, filp, cmd, arg);
1906         if (cmd == ICARSACRT)
1907                 return trans_modexpo_crt32(perms, filp, cmd, arg);
1908         if (cmd == ZSECSENDCPRB)
1909                 return trans_xcRB32(perms, filp, cmd, arg);
1910         return zcrypt_unlocked_ioctl(filp, cmd, arg);
1911 }
1912 #endif
1913
1914 /*
1915  * Misc device file operations.
1916  */
1917 static const struct file_operations zcrypt_fops = {
1918         .owner          = THIS_MODULE,
1919         .read           = zcrypt_read,
1920         .write          = zcrypt_write,
1921         .unlocked_ioctl = zcrypt_unlocked_ioctl,
1922 #ifdef CONFIG_COMPAT
1923         .compat_ioctl   = zcrypt_compat_ioctl,
1924 #endif
1925         .open           = zcrypt_open,
1926         .release        = zcrypt_release,
1927         .llseek         = no_llseek,
1928 };
1929
1930 /*
1931  * Misc device.
1932  */
1933 static struct miscdevice zcrypt_misc_device = {
1934         .minor      = MISC_DYNAMIC_MINOR,
1935         .name       = "z90crypt",
1936         .fops       = &zcrypt_fops,
1937 };
1938
1939 static int zcrypt_rng_device_count;
1940 static u32 *zcrypt_rng_buffer;
1941 static int zcrypt_rng_buffer_index;
1942 static DEFINE_MUTEX(zcrypt_rng_mutex);
1943
1944 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1945 {
1946         int rc;
1947
1948         /*
1949          * We don't need locking here because the RNG API guarantees serialized
1950          * read method calls.
1951          */
1952         if (zcrypt_rng_buffer_index == 0) {
1953                 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1954                 /* on failure: retry once again after a requested rescan */
1955                 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1956                         rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1957                 if (rc < 0)
1958                         return -EIO;
1959                 zcrypt_rng_buffer_index = rc / sizeof(*data);
1960         }
1961         *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1962         return sizeof(*data);
1963 }
1964
1965 static struct hwrng zcrypt_rng_dev = {
1966         .name           = "zcrypt",
1967         .data_read      = zcrypt_rng_data_read,
1968         .quality        = 990,
1969 };
1970
1971 int zcrypt_rng_device_add(void)
1972 {
1973         int rc = 0;
1974
1975         mutex_lock(&zcrypt_rng_mutex);
1976         if (zcrypt_rng_device_count == 0) {
1977                 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
1978                 if (!zcrypt_rng_buffer) {
1979                         rc = -ENOMEM;
1980                         goto out;
1981                 }
1982                 zcrypt_rng_buffer_index = 0;
1983                 if (!zcrypt_hwrng_seed)
1984                         zcrypt_rng_dev.quality = 0;
1985                 rc = hwrng_register(&zcrypt_rng_dev);
1986                 if (rc)
1987                         goto out_free;
1988                 zcrypt_rng_device_count = 1;
1989         } else
1990                 zcrypt_rng_device_count++;
1991         mutex_unlock(&zcrypt_rng_mutex);
1992         return 0;
1993
1994 out_free:
1995         free_page((unsigned long) zcrypt_rng_buffer);
1996 out:
1997         mutex_unlock(&zcrypt_rng_mutex);
1998         return rc;
1999 }
2000
2001 void zcrypt_rng_device_remove(void)
2002 {
2003         mutex_lock(&zcrypt_rng_mutex);
2004         zcrypt_rng_device_count--;
2005         if (zcrypt_rng_device_count == 0) {
2006                 hwrng_unregister(&zcrypt_rng_dev);
2007                 free_page((unsigned long) zcrypt_rng_buffer);
2008         }
2009         mutex_unlock(&zcrypt_rng_mutex);
2010 }
2011
2012 int __init zcrypt_debug_init(void)
2013 {
2014         zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
2015                                          DBF_MAX_SPRINTF_ARGS * sizeof(long));
2016         debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
2017         debug_set_level(zcrypt_dbf_info, DBF_ERR);
2018
2019         return 0;
2020 }
2021
2022 void zcrypt_debug_exit(void)
2023 {
2024         debug_unregister(zcrypt_dbf_info);
2025 }
2026
2027 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2028
2029 static int __init zcdn_init(void)
2030 {
2031         int rc;
2032
2033         /* create a new class 'zcrypt' */
2034         zcrypt_class = class_create(THIS_MODULE, ZCRYPT_NAME);
2035         if (IS_ERR(zcrypt_class)) {
2036                 rc = PTR_ERR(zcrypt_class);
2037                 goto out_class_create_failed;
2038         }
2039         zcrypt_class->dev_release = zcdn_device_release;
2040
2041         /* alloc device minor range */
2042         rc = alloc_chrdev_region(&zcrypt_devt,
2043                                  0, ZCRYPT_MAX_MINOR_NODES,
2044                                  ZCRYPT_NAME);
2045         if (rc)
2046                 goto out_alloc_chrdev_failed;
2047
2048         cdev_init(&zcrypt_cdev, &zcrypt_fops);
2049         zcrypt_cdev.owner = THIS_MODULE;
2050         rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2051         if (rc)
2052                 goto out_cdev_add_failed;
2053
2054         /* need some class specific sysfs attributes */
2055         rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
2056         if (rc)
2057                 goto out_class_create_file_1_failed;
2058         rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
2059         if (rc)
2060                 goto out_class_create_file_2_failed;
2061
2062         return 0;
2063
2064 out_class_create_file_2_failed:
2065         class_remove_file(zcrypt_class, &class_attr_zcdn_create);
2066 out_class_create_file_1_failed:
2067         cdev_del(&zcrypt_cdev);
2068 out_cdev_add_failed:
2069         unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2070 out_alloc_chrdev_failed:
2071         class_destroy(zcrypt_class);
2072 out_class_create_failed:
2073         return rc;
2074 }
2075
2076 static void zcdn_exit(void)
2077 {
2078         class_remove_file(zcrypt_class, &class_attr_zcdn_create);
2079         class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
2080         zcdn_destroy_all();
2081         cdev_del(&zcrypt_cdev);
2082         unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2083         class_destroy(zcrypt_class);
2084 }
2085
2086 #endif
2087
2088 /**
2089  * zcrypt_api_init(): Module initialization.
2090  *
2091  * The module initialization code.
2092  */
2093 int __init zcrypt_api_init(void)
2094 {
2095         int rc;
2096
2097         rc = zcrypt_debug_init();
2098         if (rc)
2099                 goto out;
2100
2101 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2102         rc = zcdn_init();
2103         if (rc)
2104                 goto out;
2105 #endif
2106
2107         /* Register the request sprayer. */
2108         rc = misc_register(&zcrypt_misc_device);
2109         if (rc < 0)
2110                 goto out_misc_register_failed;
2111
2112         zcrypt_msgtype6_init();
2113         zcrypt_msgtype50_init();
2114
2115         return 0;
2116
2117 out_misc_register_failed:
2118 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2119         zcdn_exit();
2120 #endif
2121         zcrypt_debug_exit();
2122 out:
2123         return rc;
2124 }
2125
2126 /**
2127  * zcrypt_api_exit(): Module termination.
2128  *
2129  * The module termination code.
2130  */
2131 void __exit zcrypt_api_exit(void)
2132 {
2133 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2134         zcdn_exit();
2135 #endif
2136         misc_deregister(&zcrypt_misc_device);
2137         zcrypt_msgtype6_exit();
2138         zcrypt_msgtype50_exit();
2139         zcrypt_ccamisc_exit();
2140         zcrypt_ep11misc_exit();
2141         zcrypt_debug_exit();
2142 }
2143
2144 module_init(zcrypt_api_init);
2145 module_exit(zcrypt_api_exit);