arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / arch / powerpc / include / asm / plpar_wrappers.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
3 #define _ASM_POWERPC_PLPAR_WRAPPERS_H
4
5 #ifdef CONFIG_PPC_PSERIES
6
7 #include <linux/string.h>
8 #include <linux/irqflags.h>
9 #include <linux/delay.h>
10
11 #include <asm/hvcall.h>
12 #include <asm/paca.h>
13 #include <asm/lppaca.h>
14 #include <asm/page.h>
15
16 static inline long poll_pending(void)
17 {
18         return plpar_hcall_norets(H_POLL_PENDING);
19 }
20
21 static inline u8 get_cede_latency_hint(void)
22 {
23         return get_lppaca()->cede_latency_hint;
24 }
25
26 static inline void set_cede_latency_hint(u8 latency_hint)
27 {
28         get_lppaca()->cede_latency_hint = latency_hint;
29 }
30
31 static inline long cede_processor(void)
32 {
33         /*
34          * We cannot call tracepoints inside RCU idle regions which
35          * means we must not trace H_CEDE.
36          */
37         return plpar_hcall_norets_notrace(H_CEDE);
38 }
39
40 static inline long extended_cede_processor(unsigned long latency_hint)
41 {
42         long rc;
43         u8 old_latency_hint = get_cede_latency_hint();
44
45         set_cede_latency_hint(latency_hint);
46
47         rc = cede_processor();
48
49         /* Ensure that H_CEDE returns with IRQs on */
50         if (WARN_ON(IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && !(mfmsr() & MSR_EE)))
51                 __hard_irq_enable();
52
53         set_cede_latency_hint(old_latency_hint);
54
55         return rc;
56 }
57
58 static inline long vpa_call(unsigned long flags, unsigned long cpu,
59                 unsigned long vpa)
60 {
61         flags = flags << H_VPA_FUNC_SHIFT;
62
63         return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
64 }
65
66 static inline long unregister_vpa(unsigned long cpu)
67 {
68         return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
69 }
70
71 static inline long register_vpa(unsigned long cpu, unsigned long vpa)
72 {
73         return vpa_call(H_VPA_REG_VPA, cpu, vpa);
74 }
75
76 static inline long unregister_slb_shadow(unsigned long cpu)
77 {
78         return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
79 }
80
81 static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
82 {
83         return vpa_call(H_VPA_REG_SLB, cpu, vpa);
84 }
85
86 static inline long unregister_dtl(unsigned long cpu)
87 {
88         return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
89 }
90
91 static inline long register_dtl(unsigned long cpu, unsigned long vpa)
92 {
93         return vpa_call(H_VPA_REG_DTL, cpu, vpa);
94 }
95
96 extern void vpa_init(int cpu);
97
98 static inline long plpar_pte_enter(unsigned long flags,
99                 unsigned long hpte_group, unsigned long hpte_v,
100                 unsigned long hpte_r, unsigned long *slot)
101 {
102         long rc;
103         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
104
105         rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
106
107         *slot = retbuf[0];
108
109         return rc;
110 }
111
112 static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
113                 unsigned long avpn, unsigned long *old_pteh_ret,
114                 unsigned long *old_ptel_ret)
115 {
116         long rc;
117         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
118
119         rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
120
121         *old_pteh_ret = retbuf[0];
122         *old_ptel_ret = retbuf[1];
123
124         return rc;
125 }
126
127 /* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
128 static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
129                 unsigned long avpn, unsigned long *old_pteh_ret,
130                 unsigned long *old_ptel_ret)
131 {
132         long rc;
133         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
134
135         rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
136
137         *old_pteh_ret = retbuf[0];
138         *old_ptel_ret = retbuf[1];
139
140         return rc;
141 }
142
143 static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
144                 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
145 {
146         long rc;
147         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
148
149         rc = plpar_hcall(H_READ, retbuf, flags, ptex);
150
151         *old_pteh_ret = retbuf[0];
152         *old_ptel_ret = retbuf[1];
153
154         return rc;
155 }
156
157 /* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
158 static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
159                 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
160 {
161         long rc;
162         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
163
164         rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
165
166         *old_pteh_ret = retbuf[0];
167         *old_ptel_ret = retbuf[1];
168
169         return rc;
170 }
171
172 /*
173  * ptes must be 8*sizeof(unsigned long)
174  */
175 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
176                                     unsigned long *ptes)
177
178 {
179         long rc;
180         unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
181
182         rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
183
184         memcpy(ptes, retbuf, 8*sizeof(unsigned long));
185
186         return rc;
187 }
188
189 /*
190  * plpar_pte_read_4_raw can be called in real mode.
191  * ptes must be 8*sizeof(unsigned long)
192  */
193 static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
194                                         unsigned long *ptes)
195
196 {
197         long rc;
198         unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
199
200         rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
201
202         memcpy(ptes, retbuf, 8*sizeof(unsigned long));
203
204         return rc;
205 }
206
207 static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
208                 unsigned long avpn)
209 {
210         return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
211 }
212
213 static inline long plpar_resize_hpt_prepare(unsigned long flags,
214                                             unsigned long shift)
215 {
216         return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
217 }
218
219 static inline long plpar_resize_hpt_commit(unsigned long flags,
220                                            unsigned long shift)
221 {
222         return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
223 }
224
225 static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
226                 unsigned long *tce_ret)
227 {
228         long rc;
229         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
230
231         rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
232
233         *tce_ret = retbuf[0];
234
235         return rc;
236 }
237
238 static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
239                 unsigned long tceval)
240 {
241         return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
242 }
243
244 static inline long plpar_tce_put_indirect(unsigned long liobn,
245                 unsigned long ioba, unsigned long page, unsigned long count)
246 {
247         return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
248 }
249
250 static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
251                 unsigned long tceval, unsigned long count)
252 {
253         return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
254 }
255
256 /* Set various resource mode parameters */
257 static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
258                 unsigned long value1, unsigned long value2)
259 {
260         return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
261 }
262
263 /*
264  * Enable relocation on exceptions on this partition
265  *
266  * Note: this call has a partition wide scope and can take a while to complete.
267  * If it returns H_LONG_BUSY_* it should be retried periodically until it
268  * returns H_SUCCESS.
269  */
270 static inline long enable_reloc_on_exceptions(void)
271 {
272         /* mflags = 3: Exceptions at 0xC000000000004000 */
273         return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
274 }
275
276 /*
277  * Disable relocation on exceptions on this partition
278  *
279  * Note: this call has a partition wide scope and can take a while to complete.
280  * If it returns H_LONG_BUSY_* it should be retried periodically until it
281  * returns H_SUCCESS.
282  */
283 static inline long disable_reloc_on_exceptions(void) {
284         return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
285 }
286
287 /*
288  * Take exceptions in big endian mode on this partition
289  *
290  * Note: this call has a partition wide scope and can take a while to complete.
291  * If it returns H_LONG_BUSY_* it should be retried periodically until it
292  * returns H_SUCCESS.
293  */
294 static inline long enable_big_endian_exceptions(void)
295 {
296         /* mflags = 0: big endian exceptions */
297         return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
298 }
299
300 /*
301  * Take exceptions in little endian mode on this partition
302  *
303  * Note: this call has a partition wide scope and can take a while to complete.
304  * If it returns H_LONG_BUSY_* it should be retried periodically until it
305  * returns H_SUCCESS.
306  */
307 static inline long enable_little_endian_exceptions(void)
308 {
309         /* mflags = 1: little endian exceptions */
310         return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
311 }
312
313 static inline long plpar_set_ciabr(unsigned long ciabr)
314 {
315         return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
316 }
317
318 static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
319 {
320         return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0);
321 }
322
323 static inline long plpar_set_watchpoint1(unsigned long dawr1, unsigned long dawrx1)
324 {
325         return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR1, dawr1, dawrx1);
326 }
327
328 static inline long plpar_signal_sys_reset(long cpu)
329 {
330         return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
331 }
332
333 static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
334 {
335         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
336         long rc;
337
338         rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
339         if (rc == H_SUCCESS) {
340                 p->character = retbuf[0];
341                 p->behaviour = retbuf[1];
342         }
343
344         return rc;
345 }
346
347 static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
348 {
349         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
350         unsigned long token;
351         long rc;
352
353         token = -1UL;
354         do {
355                 rc = plpar_hcall(H_GUEST_CREATE, retbuf, flags, token);
356                 if (rc == H_SUCCESS)
357                         *guest_id = retbuf[0];
358
359                 if (rc == H_BUSY) {
360                         token = retbuf[0];
361                         cond_resched();
362                 }
363
364                 if (H_IS_LONG_BUSY(rc)) {
365                         token = retbuf[0];
366                         msleep(get_longbusy_msecs(rc));
367                         rc = H_BUSY;
368                 }
369
370         } while (rc == H_BUSY);
371
372         return rc;
373 }
374
375 static inline long plpar_guest_create_vcpu(unsigned long flags,
376                                            unsigned long guest_id,
377                                            unsigned long vcpu_id)
378 {
379         long rc;
380
381         do {
382                 rc = plpar_hcall_norets(H_GUEST_CREATE_VCPU, 0, guest_id, vcpu_id);
383
384                 if (rc == H_BUSY)
385                         cond_resched();
386
387                 if (H_IS_LONG_BUSY(rc)) {
388                         msleep(get_longbusy_msecs(rc));
389                         rc = H_BUSY;
390                 }
391
392         } while (rc == H_BUSY);
393
394         return rc;
395 }
396
397 static inline long plpar_guest_set_state(unsigned long flags,
398                                          unsigned long guest_id,
399                                          unsigned long vcpu_id,
400                                          unsigned long data_buffer,
401                                          unsigned long data_size,
402                                          unsigned long *failed_index)
403 {
404         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
405         long rc;
406
407         while (true) {
408                 rc = plpar_hcall(H_GUEST_SET_STATE, retbuf, flags, guest_id,
409                                  vcpu_id, data_buffer, data_size);
410
411                 if (rc == H_BUSY) {
412                         cpu_relax();
413                         continue;
414                 }
415
416                 if (H_IS_LONG_BUSY(rc)) {
417                         mdelay(get_longbusy_msecs(rc));
418                         continue;
419                 }
420
421                 if (rc == H_INVALID_ELEMENT_ID)
422                         *failed_index = retbuf[0];
423                 else if (rc == H_INVALID_ELEMENT_SIZE)
424                         *failed_index = retbuf[0];
425                 else if (rc == H_INVALID_ELEMENT_VALUE)
426                         *failed_index = retbuf[0];
427
428                 break;
429         }
430
431         return rc;
432 }
433
434 static inline long plpar_guest_get_state(unsigned long flags,
435                                          unsigned long guest_id,
436                                          unsigned long vcpu_id,
437                                          unsigned long data_buffer,
438                                          unsigned long data_size,
439                                          unsigned long *failed_index)
440 {
441         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
442         long rc;
443
444         while (true) {
445                 rc = plpar_hcall(H_GUEST_GET_STATE, retbuf, flags, guest_id,
446                                  vcpu_id, data_buffer, data_size);
447
448                 if (rc == H_BUSY) {
449                         cpu_relax();
450                         continue;
451                 }
452
453                 if (H_IS_LONG_BUSY(rc)) {
454                         mdelay(get_longbusy_msecs(rc));
455                         continue;
456                 }
457
458                 if (rc == H_INVALID_ELEMENT_ID)
459                         *failed_index = retbuf[0];
460                 else if (rc == H_INVALID_ELEMENT_SIZE)
461                         *failed_index = retbuf[0];
462                 else if (rc == H_INVALID_ELEMENT_VALUE)
463                         *failed_index = retbuf[0];
464
465                 break;
466         }
467
468         return rc;
469 }
470
471 static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
472                                         unsigned long vcpu_id, int *trap,
473                                         unsigned long *failed_index)
474 {
475         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
476         long rc;
477
478         rc = plpar_hcall(H_GUEST_RUN_VCPU, retbuf, flags, guest_id, vcpu_id);
479         if (rc == H_SUCCESS)
480                 *trap = retbuf[0];
481         else if (rc == H_INVALID_ELEMENT_ID)
482                 *failed_index = retbuf[0];
483         else if (rc == H_INVALID_ELEMENT_SIZE)
484                 *failed_index = retbuf[0];
485         else if (rc == H_INVALID_ELEMENT_VALUE)
486                 *failed_index = retbuf[0];
487
488         return rc;
489 }
490
491 static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
492 {
493         long rc;
494
495         do {
496                 rc = plpar_hcall_norets(H_GUEST_DELETE, flags, guest_id);
497                 if (rc == H_BUSY)
498                         cond_resched();
499
500                 if (H_IS_LONG_BUSY(rc)) {
501                         msleep(get_longbusy_msecs(rc));
502                         rc = H_BUSY;
503                 }
504
505         } while (rc == H_BUSY);
506
507         return rc;
508 }
509
510 static inline long plpar_guest_set_capabilities(unsigned long flags,
511                                                 unsigned long capabilities)
512 {
513         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
514         long rc;
515
516         do {
517                 rc = plpar_hcall(H_GUEST_SET_CAPABILITIES, retbuf, flags, capabilities);
518                 if (rc == H_BUSY)
519                         cond_resched();
520
521                 if (H_IS_LONG_BUSY(rc)) {
522                         msleep(get_longbusy_msecs(rc));
523                         rc = H_BUSY;
524                 }
525         } while (rc == H_BUSY);
526
527         return rc;
528 }
529
530 static inline long plpar_guest_get_capabilities(unsigned long flags,
531                                                 unsigned long *capabilities)
532 {
533         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
534         long rc;
535
536         do {
537                 rc = plpar_hcall(H_GUEST_GET_CAPABILITIES, retbuf, flags);
538                 if (rc == H_BUSY)
539                         cond_resched();
540
541                 if (H_IS_LONG_BUSY(rc)) {
542                         msleep(get_longbusy_msecs(rc));
543                         rc = H_BUSY;
544                 }
545         } while (rc == H_BUSY);
546
547         if (rc == H_SUCCESS)
548                 *capabilities = retbuf[0];
549
550         return rc;
551 }
552
553 /*
554  * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
555  *
556  * - Returns H_SUCCESS on success
557  * - For H_BUSY return value, we retry the hcall.
558  * - For any other hcall failures, attempt a full flush once before
559  *   resorting to BUG().
560  *
561  * Note: This hcall is expected to fail only very rarely. The correct
562  * error recovery of killing the process/guest will be eventually
563  * needed.
564  */
565 static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
566                                           u64 page_sizes, u64 start, u64 end)
567 {
568         long rc;
569         unsigned long all;
570
571         while (true) {
572                 rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, type,
573                                         page_sizes, start, end);
574                 if (rc == H_BUSY) {
575                         cpu_relax();
576                         continue;
577                 } else if (rc == H_SUCCESS)
578                         return rc;
579
580                 /* Flush request failed, try with a full flush once */
581                 if (type & H_RPTI_TYPE_NESTED)
582                         all = H_RPTI_TYPE_NESTED | H_RPTI_TYPE_NESTED_ALL;
583                 else
584                         all = H_RPTI_TYPE_ALL;
585 retry:
586                 rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target,
587                                         all, page_sizes, 0, -1UL);
588                 if (rc == H_BUSY) {
589                         cpu_relax();
590                         goto retry;
591                 } else if (rc == H_SUCCESS)
592                         return rc;
593
594                 BUG();
595         }
596 }
597
598 #else /* !CONFIG_PPC_PSERIES */
599
600 static inline long plpar_set_ciabr(unsigned long ciabr)
601 {
602         return 0;
603 }
604
605 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
606                                     unsigned long *ptes)
607 {
608         return 0;
609 }
610
611 static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
612                                           u64 page_sizes, u64 start, u64 end)
613 {
614         return 0;
615 }
616
617 static inline long plpar_guest_create_vcpu(unsigned long flags,
618                                            unsigned long guest_id,
619                                            unsigned long vcpu_id)
620 {
621         return 0;
622 }
623
624 static inline long plpar_guest_get_state(unsigned long flags,
625                                          unsigned long guest_id,
626                                          unsigned long vcpu_id,
627                                          unsigned long data_buffer,
628                                          unsigned long data_size,
629                                          unsigned long *failed_index)
630 {
631         return 0;
632 }
633
634 static inline long plpar_guest_set_state(unsigned long flags,
635                                          unsigned long guest_id,
636                                          unsigned long vcpu_id,
637                                          unsigned long data_buffer,
638                                          unsigned long data_size,
639                                          unsigned long *failed_index)
640 {
641         return 0;
642 }
643
644 static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
645                                         unsigned long vcpu_id, int *trap,
646                                         unsigned long *failed_index)
647 {
648         return 0;
649 }
650
651 static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
652 {
653         return 0;
654 }
655
656 static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
657 {
658         return 0;
659 }
660
661 static inline long plpar_guest_get_capabilities(unsigned long flags,
662                                                 unsigned long *capabilities)
663 {
664         return 0;
665 }
666
667 static inline long plpar_guest_set_capabilities(unsigned long flags,
668                                                 unsigned long capabilities)
669 {
670         return 0;
671 }
672
673 #endif /* CONFIG_PPC_PSERIES */
674
675 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */