GNU Linux-libre 4.9.304-gnu1
[releases.git] / arch / x86 / kernel / vm86_32.c
1 /*
2  *  Copyright (C) 1994  Linus Torvalds
3  *
4  *  29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
5  *                stack - Manfred Spraul <manfred@colorfullife.com>
6  *
7  *  22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8  *                them correctly. Now the emulation will be in a
9  *                consistent state after stackfaults - Kasper Dupont
10  *                <kasperd@daimi.au.dk>
11  *
12  *  22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
13  *                <kasperd@daimi.au.dk>
14  *
15  *  ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
16  *                caused by Kasper Dupont's changes - Stas Sergeev
17  *
18  *   4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
19  *                Kasper Dupont <kasperd@daimi.au.dk>
20  *
21  *   9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
22  *                Kasper Dupont <kasperd@daimi.au.dk>
23  *
24  *   9 apr 2002 - Changed stack access macros to jump to a label
25  *                instead of returning to userspace. This simplifies
26  *                do_int, and is needed by handle_vm6_fault. Kasper
27  *                Dupont <kasperd@daimi.au.dk>
28  *
29  */
30
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/interrupt.h>
36 #include <linux/syscalls.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/signal.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/smp.h>
43 #include <linux/highmem.h>
44 #include <linux/ptrace.h>
45 #include <linux/audit.h>
46 #include <linux/stddef.h>
47 #include <linux/slab.h>
48 #include <linux/security.h>
49
50 #include <asm/uaccess.h>
51 #include <asm/io.h>
52 #include <asm/tlbflush.h>
53 #include <asm/irq.h>
54 #include <asm/traps.h>
55 #include <asm/vm86.h>
56
57 /*
58  * Known problems:
59  *
60  * Interrupt handling is not guaranteed:
61  * - a real x86 will disable all interrupts for one instruction
62  *   after a "mov ss,xx" to make stack handling atomic even without
63  *   the 'lss' instruction. We can't guarantee this in v86 mode,
64  *   as the next instruction might result in a page fault or similar.
65  * - a real x86 will have interrupts disabled for one instruction
66  *   past the 'sti' that enables them. We don't bother with all the
67  *   details yet.
68  *
69  * Let's hope these problems do not actually matter for anything.
70  */
71
72
73 /*
74  * 8- and 16-bit register defines..
75  */
76 #define AL(regs)        (((unsigned char *)&((regs)->pt.ax))[0])
77 #define AH(regs)        (((unsigned char *)&((regs)->pt.ax))[1])
78 #define IP(regs)        (*(unsigned short *)&((regs)->pt.ip))
79 #define SP(regs)        (*(unsigned short *)&((regs)->pt.sp))
80
81 /*
82  * virtual flags (16 and 32-bit versions)
83  */
84 #define VFLAGS  (*(unsigned short *)&(current->thread.vm86->veflags))
85 #define VEFLAGS (current->thread.vm86->veflags)
86
87 #define set_flags(X, new, mask) \
88 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
89
90 #define SAFE_MASK       (0xDD5)
91 #define RETURN_MASK     (0xDFF)
92
93 void save_v86_state(struct kernel_vm86_regs *regs, int retval)
94 {
95         struct tss_struct *tss;
96         struct task_struct *tsk = current;
97         struct vm86plus_struct __user *user;
98         struct vm86 *vm86 = current->thread.vm86;
99         long err = 0;
100
101         /*
102          * This gets called from entry.S with interrupts disabled, but
103          * from process context. Enable interrupts here, before trying
104          * to access user space.
105          */
106         local_irq_enable();
107
108         if (!vm86 || !vm86->user_vm86) {
109                 pr_alert("no user_vm86: BAD\n");
110                 do_exit(SIGSEGV);
111         }
112         set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
113         user = vm86->user_vm86;
114
115         if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
116                        sizeof(struct vm86plus_struct) :
117                        sizeof(struct vm86_struct))) {
118                 pr_alert("could not access userspace vm86 info\n");
119                 do_exit(SIGSEGV);
120         }
121
122         put_user_try {
123                 put_user_ex(regs->pt.bx, &user->regs.ebx);
124                 put_user_ex(regs->pt.cx, &user->regs.ecx);
125                 put_user_ex(regs->pt.dx, &user->regs.edx);
126                 put_user_ex(regs->pt.si, &user->regs.esi);
127                 put_user_ex(regs->pt.di, &user->regs.edi);
128                 put_user_ex(regs->pt.bp, &user->regs.ebp);
129                 put_user_ex(regs->pt.ax, &user->regs.eax);
130                 put_user_ex(regs->pt.ip, &user->regs.eip);
131                 put_user_ex(regs->pt.cs, &user->regs.cs);
132                 put_user_ex(regs->pt.flags, &user->regs.eflags);
133                 put_user_ex(regs->pt.sp, &user->regs.esp);
134                 put_user_ex(regs->pt.ss, &user->regs.ss);
135                 put_user_ex(regs->es, &user->regs.es);
136                 put_user_ex(regs->ds, &user->regs.ds);
137                 put_user_ex(regs->fs, &user->regs.fs);
138                 put_user_ex(regs->gs, &user->regs.gs);
139
140                 put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
141         } put_user_catch(err);
142         if (err) {
143                 pr_alert("could not access userspace vm86 info\n");
144                 do_exit(SIGSEGV);
145         }
146
147         tss = &per_cpu(cpu_tss, get_cpu());
148         tsk->thread.sp0 = vm86->saved_sp0;
149         tsk->thread.sysenter_cs = __KERNEL_CS;
150         load_sp0(tss, &tsk->thread);
151         vm86->saved_sp0 = 0;
152         put_cpu();
153
154         memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));
155
156         lazy_load_gs(vm86->regs32.gs);
157
158         regs->pt.ax = retval;
159 }
160
161 static void mark_screen_rdonly(struct mm_struct *mm)
162 {
163         struct vm_area_struct *vma;
164         spinlock_t *ptl;
165         pgd_t *pgd;
166         pud_t *pud;
167         pmd_t *pmd;
168         pte_t *pte;
169         int i;
170
171         down_write(&mm->mmap_sem);
172         pgd = pgd_offset(mm, 0xA0000);
173         if (pgd_none_or_clear_bad(pgd))
174                 goto out;
175         pud = pud_offset(pgd, 0xA0000);
176         if (pud_none_or_clear_bad(pud))
177                 goto out;
178         pmd = pmd_offset(pud, 0xA0000);
179
180         if (pmd_trans_huge(*pmd)) {
181                 vma = find_vma(mm, 0xA0000);
182                 split_huge_pmd(vma, pmd, 0xA0000);
183         }
184         if (pmd_none_or_clear_bad(pmd))
185                 goto out;
186         pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
187         for (i = 0; i < 32; i++) {
188                 if (pte_present(*pte))
189                         set_pte(pte, pte_wrprotect(*pte));
190                 pte++;
191         }
192         pte_unmap_unlock(pte, ptl);
193 out:
194         up_write(&mm->mmap_sem);
195         flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
196 }
197
198
199
200 static int do_vm86_irq_handling(int subfunction, int irqnumber);
201 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
202
203 SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
204 {
205         return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
206 }
207
208
209 SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
210 {
211         switch (cmd) {
212         case VM86_REQUEST_IRQ:
213         case VM86_FREE_IRQ:
214         case VM86_GET_IRQ_BITS:
215         case VM86_GET_AND_RESET_IRQ:
216                 return do_vm86_irq_handling(cmd, (int)arg);
217         case VM86_PLUS_INSTALL_CHECK:
218                 /*
219                  * NOTE: on old vm86 stuff this will return the error
220                  *  from access_ok(), because the subfunction is
221                  *  interpreted as (invalid) address to vm86_struct.
222                  *  So the installation check works.
223                  */
224                 return 0;
225         }
226
227         /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
228         return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
229 }
230
231
232 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
233 {
234         struct tss_struct *tss;
235         struct task_struct *tsk = current;
236         struct vm86 *vm86 = tsk->thread.vm86;
237         struct kernel_vm86_regs vm86regs;
238         struct pt_regs *regs = current_pt_regs();
239         unsigned long err = 0;
240
241         err = security_mmap_addr(0);
242         if (err) {
243                 /*
244                  * vm86 cannot virtualize the address space, so vm86 users
245                  * need to manage the low 1MB themselves using mmap.  Given
246                  * that BIOS places important data in the first page, vm86
247                  * is essentially useless if mmap_min_addr != 0.  DOSEMU,
248                  * for example, won't even bother trying to use vm86 if it
249                  * can't map a page at virtual address 0.
250                  *
251                  * To reduce the available kernel attack surface, simply
252                  * disallow vm86(old) for users who cannot mmap at va 0.
253                  *
254                  * The implementation of security_mmap_addr will allow
255                  * suitably privileged users to map va 0 even if
256                  * vm.mmap_min_addr is set above 0, and we want this
257                  * behavior for vm86 as well, as it ensures that legacy
258                  * tools like vbetool will not fail just because of
259                  * vm.mmap_min_addr.
260                  */
261                 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d).  Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
262                              current->comm, task_pid_nr(current),
263                              from_kuid_munged(&init_user_ns, current_uid()));
264                 return -EPERM;
265         }
266
267         if (!vm86) {
268                 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
269                         return -ENOMEM;
270                 tsk->thread.vm86 = vm86;
271         }
272         if (vm86->saved_sp0)
273                 return -EPERM;
274
275         if (!access_ok(VERIFY_READ, user_vm86, plus ?
276                        sizeof(struct vm86_struct) :
277                        sizeof(struct vm86plus_struct)))
278                 return -EFAULT;
279
280         memset(&vm86regs, 0, sizeof(vm86regs));
281         get_user_try {
282                 unsigned short seg;
283                 get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
284                 get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
285                 get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
286                 get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
287                 get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
288                 get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
289                 get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
290                 get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
291                 get_user_ex(seg, &user_vm86->regs.cs);
292                 vm86regs.pt.cs = seg;
293                 get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
294                 get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
295                 get_user_ex(seg, &user_vm86->regs.ss);
296                 vm86regs.pt.ss = seg;
297                 get_user_ex(vm86regs.es, &user_vm86->regs.es);
298                 get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
299                 get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
300                 get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
301
302                 get_user_ex(vm86->flags, &user_vm86->flags);
303                 get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
304                 get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
305         } get_user_catch(err);
306         if (err)
307                 return err;
308
309         if (copy_from_user(&vm86->int_revectored,
310                            &user_vm86->int_revectored,
311                            sizeof(struct revectored_struct)))
312                 return -EFAULT;
313         if (copy_from_user(&vm86->int21_revectored,
314                            &user_vm86->int21_revectored,
315                            sizeof(struct revectored_struct)))
316                 return -EFAULT;
317         if (plus) {
318                 if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
319                                    sizeof(struct vm86plus_info_struct)))
320                         return -EFAULT;
321                 vm86->vm86plus.is_vm86pus = 1;
322         } else
323                 memset(&vm86->vm86plus, 0,
324                        sizeof(struct vm86plus_info_struct));
325
326         memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
327         vm86->user_vm86 = user_vm86;
328
329 /*
330  * The flags register is also special: we cannot trust that the user
331  * has set it up safely, so this makes sure interrupt etc flags are
332  * inherited from protected mode.
333  */
334         VEFLAGS = vm86regs.pt.flags;
335         vm86regs.pt.flags &= SAFE_MASK;
336         vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
337         vm86regs.pt.flags |= X86_VM_MASK;
338
339         vm86regs.pt.orig_ax = regs->orig_ax;
340
341         switch (vm86->cpu_type) {
342         case CPU_286:
343                 vm86->veflags_mask = 0;
344                 break;
345         case CPU_386:
346                 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
347                 break;
348         case CPU_486:
349                 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
350                 break;
351         default:
352                 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
353                 break;
354         }
355
356 /*
357  * Save old state
358  */
359         vm86->saved_sp0 = tsk->thread.sp0;
360         lazy_save_gs(vm86->regs32.gs);
361
362         tss = &per_cpu(cpu_tss, get_cpu());
363         /* make room for real-mode segments */
364         tsk->thread.sp0 += 16;
365
366         if (static_cpu_has(X86_FEATURE_SEP))
367                 tsk->thread.sysenter_cs = 0;
368
369         load_sp0(tss, &tsk->thread);
370         put_cpu();
371
372         if (vm86->flags & VM86_SCREEN_BITMAP)
373                 mark_screen_rdonly(tsk->mm);
374
375         memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
376         force_iret();
377         return regs->ax;
378 }
379
380 static inline void set_IF(struct kernel_vm86_regs *regs)
381 {
382         VEFLAGS |= X86_EFLAGS_VIF;
383 }
384
385 static inline void clear_IF(struct kernel_vm86_regs *regs)
386 {
387         VEFLAGS &= ~X86_EFLAGS_VIF;
388 }
389
390 static inline void clear_TF(struct kernel_vm86_regs *regs)
391 {
392         regs->pt.flags &= ~X86_EFLAGS_TF;
393 }
394
395 static inline void clear_AC(struct kernel_vm86_regs *regs)
396 {
397         regs->pt.flags &= ~X86_EFLAGS_AC;
398 }
399
400 /*
401  * It is correct to call set_IF(regs) from the set_vflags_*
402  * functions. However someone forgot to call clear_IF(regs)
403  * in the opposite case.
404  * After the command sequence CLI PUSHF STI POPF you should
405  * end up with interrupts disabled, but you ended up with
406  * interrupts enabled.
407  *  ( I was testing my own changes, but the only bug I
408  *    could find was in a function I had not changed. )
409  * [KD]
410  */
411
412 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
413 {
414         set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
415         set_flags(regs->pt.flags, flags, SAFE_MASK);
416         if (flags & X86_EFLAGS_IF)
417                 set_IF(regs);
418         else
419                 clear_IF(regs);
420 }
421
422 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
423 {
424         set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
425         set_flags(regs->pt.flags, flags, SAFE_MASK);
426         if (flags & X86_EFLAGS_IF)
427                 set_IF(regs);
428         else
429                 clear_IF(regs);
430 }
431
432 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
433 {
434         unsigned long flags = regs->pt.flags & RETURN_MASK;
435
436         if (VEFLAGS & X86_EFLAGS_VIF)
437                 flags |= X86_EFLAGS_IF;
438         flags |= X86_EFLAGS_IOPL;
439         return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
440 }
441
442 static inline int is_revectored(int nr, struct revectored_struct *bitmap)
443 {
444         return test_bit(nr, bitmap->__map);
445 }
446
447 #define val_byte(val, n) (((__u8 *)&val)[n])
448
449 #define pushb(base, ptr, val, err_label) \
450         do { \
451                 __u8 __val = val; \
452                 ptr--; \
453                 if (put_user(__val, base + ptr) < 0) \
454                         goto err_label; \
455         } while (0)
456
457 #define pushw(base, ptr, val, err_label) \
458         do { \
459                 __u16 __val = val; \
460                 ptr--; \
461                 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
462                         goto err_label; \
463                 ptr--; \
464                 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
465                         goto err_label; \
466         } while (0)
467
468 #define pushl(base, ptr, val, err_label) \
469         do { \
470                 __u32 __val = val; \
471                 ptr--; \
472                 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
473                         goto err_label; \
474                 ptr--; \
475                 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
476                         goto err_label; \
477                 ptr--; \
478                 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
479                         goto err_label; \
480                 ptr--; \
481                 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
482                         goto err_label; \
483         } while (0)
484
485 #define popb(base, ptr, err_label) \
486         ({ \
487                 __u8 __res; \
488                 if (get_user(__res, base + ptr) < 0) \
489                         goto err_label; \
490                 ptr++; \
491                 __res; \
492         })
493
494 #define popw(base, ptr, err_label) \
495         ({ \
496                 __u16 __res; \
497                 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
498                         goto err_label; \
499                 ptr++; \
500                 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
501                         goto err_label; \
502                 ptr++; \
503                 __res; \
504         })
505
506 #define popl(base, ptr, err_label) \
507         ({ \
508                 __u32 __res; \
509                 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
510                         goto err_label; \
511                 ptr++; \
512                 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
513                         goto err_label; \
514                 ptr++; \
515                 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
516                         goto err_label; \
517                 ptr++; \
518                 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
519                         goto err_label; \
520                 ptr++; \
521                 __res; \
522         })
523
524 /* There are so many possible reasons for this function to return
525  * VM86_INTx, so adding another doesn't bother me. We can expect
526  * userspace programs to be able to handle it. (Getting a problem
527  * in userspace is always better than an Oops anyway.) [KD]
528  */
529 static void do_int(struct kernel_vm86_regs *regs, int i,
530     unsigned char __user *ssp, unsigned short sp)
531 {
532         unsigned long __user *intr_ptr;
533         unsigned long segoffs;
534         struct vm86 *vm86 = current->thread.vm86;
535
536         if (regs->pt.cs == BIOSSEG)
537                 goto cannot_handle;
538         if (is_revectored(i, &vm86->int_revectored))
539                 goto cannot_handle;
540         if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
541                 goto cannot_handle;
542         intr_ptr = (unsigned long __user *) (i << 2);
543         if (get_user(segoffs, intr_ptr))
544                 goto cannot_handle;
545         if ((segoffs >> 16) == BIOSSEG)
546                 goto cannot_handle;
547         pushw(ssp, sp, get_vflags(regs), cannot_handle);
548         pushw(ssp, sp, regs->pt.cs, cannot_handle);
549         pushw(ssp, sp, IP(regs), cannot_handle);
550         regs->pt.cs = segoffs >> 16;
551         SP(regs) -= 6;
552         IP(regs) = segoffs & 0xffff;
553         clear_TF(regs);
554         clear_IF(regs);
555         clear_AC(regs);
556         return;
557
558 cannot_handle:
559         save_v86_state(regs, VM86_INTx + (i << 8));
560 }
561
562 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
563 {
564         struct vm86 *vm86 = current->thread.vm86;
565
566         if (vm86->vm86plus.is_vm86pus) {
567                 if ((trapno == 3) || (trapno == 1)) {
568                         save_v86_state(regs, VM86_TRAP + (trapno << 8));
569                         return 0;
570                 }
571                 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
572                 return 0;
573         }
574         if (trapno != 1)
575                 return 1; /* we let this handle by the calling routine */
576         current->thread.trap_nr = trapno;
577         current->thread.error_code = error_code;
578         force_sig(SIGTRAP, current);
579         return 0;
580 }
581
582 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
583 {
584         unsigned char opcode;
585         unsigned char __user *csp;
586         unsigned char __user *ssp;
587         unsigned short ip, sp, orig_flags;
588         int data32, pref_done;
589         struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus;
590
591 #define CHECK_IF_IN_TRAP \
592         if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
593                 newflags |= X86_EFLAGS_TF
594
595         orig_flags = *(unsigned short *)&regs->pt.flags;
596
597         csp = (unsigned char __user *) (regs->pt.cs << 4);
598         ssp = (unsigned char __user *) (regs->pt.ss << 4);
599         sp = SP(regs);
600         ip = IP(regs);
601
602         data32 = 0;
603         pref_done = 0;
604         do {
605                 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
606                 case 0x66:      /* 32-bit data */     data32 = 1; break;
607                 case 0x67:      /* 32-bit address */  break;
608                 case 0x2e:      /* CS */              break;
609                 case 0x3e:      /* DS */              break;
610                 case 0x26:      /* ES */              break;
611                 case 0x36:      /* SS */              break;
612                 case 0x65:      /* GS */              break;
613                 case 0x64:      /* FS */              break;
614                 case 0xf2:      /* repnz */       break;
615                 case 0xf3:      /* rep */             break;
616                 default: pref_done = 1;
617                 }
618         } while (!pref_done);
619
620         switch (opcode) {
621
622         /* pushf */
623         case 0x9c:
624                 if (data32) {
625                         pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
626                         SP(regs) -= 4;
627                 } else {
628                         pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
629                         SP(regs) -= 2;
630                 }
631                 IP(regs) = ip;
632                 goto vm86_fault_return;
633
634         /* popf */
635         case 0x9d:
636                 {
637                 unsigned long newflags;
638                 if (data32) {
639                         newflags = popl(ssp, sp, simulate_sigsegv);
640                         SP(regs) += 4;
641                 } else {
642                         newflags = popw(ssp, sp, simulate_sigsegv);
643                         SP(regs) += 2;
644                 }
645                 IP(regs) = ip;
646                 CHECK_IF_IN_TRAP;
647                 if (data32)
648                         set_vflags_long(newflags, regs);
649                 else
650                         set_vflags_short(newflags, regs);
651
652                 goto check_vip;
653                 }
654
655         /* int xx */
656         case 0xcd: {
657                 int intno = popb(csp, ip, simulate_sigsegv);
658                 IP(regs) = ip;
659                 if (vmpi->vm86dbg_active) {
660                         if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
661                                 save_v86_state(regs, VM86_INTx + (intno << 8));
662                                 return;
663                         }
664                 }
665                 do_int(regs, intno, ssp, sp);
666                 return;
667         }
668
669         /* iret */
670         case 0xcf:
671                 {
672                 unsigned long newip;
673                 unsigned long newcs;
674                 unsigned long newflags;
675                 if (data32) {
676                         newip = popl(ssp, sp, simulate_sigsegv);
677                         newcs = popl(ssp, sp, simulate_sigsegv);
678                         newflags = popl(ssp, sp, simulate_sigsegv);
679                         SP(regs) += 12;
680                 } else {
681                         newip = popw(ssp, sp, simulate_sigsegv);
682                         newcs = popw(ssp, sp, simulate_sigsegv);
683                         newflags = popw(ssp, sp, simulate_sigsegv);
684                         SP(regs) += 6;
685                 }
686                 IP(regs) = newip;
687                 regs->pt.cs = newcs;
688                 CHECK_IF_IN_TRAP;
689                 if (data32) {
690                         set_vflags_long(newflags, regs);
691                 } else {
692                         set_vflags_short(newflags, regs);
693                 }
694                 goto check_vip;
695                 }
696
697         /* cli */
698         case 0xfa:
699                 IP(regs) = ip;
700                 clear_IF(regs);
701                 goto vm86_fault_return;
702
703         /* sti */
704         /*
705          * Damn. This is incorrect: the 'sti' instruction should actually
706          * enable interrupts after the /next/ instruction. Not good.
707          *
708          * Probably needs some horsing around with the TF flag. Aiee..
709          */
710         case 0xfb:
711                 IP(regs) = ip;
712                 set_IF(regs);
713                 goto check_vip;
714
715         default:
716                 save_v86_state(regs, VM86_UNKNOWN);
717         }
718
719         return;
720
721 check_vip:
722         if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
723             (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
724                 save_v86_state(regs, VM86_STI);
725                 return;
726         }
727
728 vm86_fault_return:
729         if (vmpi->force_return_for_pic  && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
730                 save_v86_state(regs, VM86_PICRETURN);
731                 return;
732         }
733         if (orig_flags & X86_EFLAGS_TF)
734                 handle_vm86_trap(regs, 0, X86_TRAP_DB);
735         return;
736
737 simulate_sigsegv:
738         /* FIXME: After a long discussion with Stas we finally
739          *        agreed, that this is wrong. Here we should
740          *        really send a SIGSEGV to the user program.
741          *        But how do we create the correct context? We
742          *        are inside a general protection fault handler
743          *        and has just returned from a page fault handler.
744          *        The correct context for the signal handler
745          *        should be a mixture of the two, but how do we
746          *        get the information? [KD]
747          */
748         save_v86_state(regs, VM86_UNKNOWN);
749 }
750
751 /* ---------------- vm86 special IRQ passing stuff ----------------- */
752
753 #define VM86_IRQNAME            "vm86irq"
754
755 static struct vm86_irqs {
756         struct task_struct *tsk;
757         int sig;
758 } vm86_irqs[16];
759
760 static DEFINE_SPINLOCK(irqbits_lock);
761 static int irqbits;
762
763 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
764         | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO)  | (1 << SIGURG) \
765         | (1 << SIGUNUSED))
766
767 static irqreturn_t irq_handler(int intno, void *dev_id)
768 {
769         int irq_bit;
770         unsigned long flags;
771
772         spin_lock_irqsave(&irqbits_lock, flags);
773         irq_bit = 1 << intno;
774         if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
775                 goto out;
776         irqbits |= irq_bit;
777         if (vm86_irqs[intno].sig)
778                 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
779         /*
780          * IRQ will be re-enabled when user asks for the irq (whether
781          * polling or as a result of the signal)
782          */
783         disable_irq_nosync(intno);
784         spin_unlock_irqrestore(&irqbits_lock, flags);
785         return IRQ_HANDLED;
786
787 out:
788         spin_unlock_irqrestore(&irqbits_lock, flags);
789         return IRQ_NONE;
790 }
791
792 static inline void free_vm86_irq(int irqnumber)
793 {
794         unsigned long flags;
795
796         free_irq(irqnumber, NULL);
797         vm86_irqs[irqnumber].tsk = NULL;
798
799         spin_lock_irqsave(&irqbits_lock, flags);
800         irqbits &= ~(1 << irqnumber);
801         spin_unlock_irqrestore(&irqbits_lock, flags);
802 }
803
804 void release_vm86_irqs(struct task_struct *task)
805 {
806         int i;
807         for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
808             if (vm86_irqs[i].tsk == task)
809                 free_vm86_irq(i);
810 }
811
812 static inline int get_and_reset_irq(int irqnumber)
813 {
814         int bit;
815         unsigned long flags;
816         int ret = 0;
817
818         if (invalid_vm86_irq(irqnumber)) return 0;
819         if (vm86_irqs[irqnumber].tsk != current) return 0;
820         spin_lock_irqsave(&irqbits_lock, flags);
821         bit = irqbits & (1 << irqnumber);
822         irqbits &= ~bit;
823         if (bit) {
824                 enable_irq(irqnumber);
825                 ret = 1;
826         }
827
828         spin_unlock_irqrestore(&irqbits_lock, flags);
829         return ret;
830 }
831
832
833 static int do_vm86_irq_handling(int subfunction, int irqnumber)
834 {
835         int ret;
836         switch (subfunction) {
837                 case VM86_GET_AND_RESET_IRQ: {
838                         return get_and_reset_irq(irqnumber);
839                 }
840                 case VM86_GET_IRQ_BITS: {
841                         return irqbits;
842                 }
843                 case VM86_REQUEST_IRQ: {
844                         int sig = irqnumber >> 8;
845                         int irq = irqnumber & 255;
846                         if (!capable(CAP_SYS_ADMIN)) return -EPERM;
847                         if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
848                         if (invalid_vm86_irq(irq)) return -EPERM;
849                         if (vm86_irqs[irq].tsk) return -EPERM;
850                         ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
851                         if (ret) return ret;
852                         vm86_irqs[irq].sig = sig;
853                         vm86_irqs[irq].tsk = current;
854                         return irq;
855                 }
856                 case  VM86_FREE_IRQ: {
857                         if (invalid_vm86_irq(irqnumber)) return -EPERM;
858                         if (!vm86_irqs[irqnumber].tsk) return 0;
859                         if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
860                         free_vm86_irq(irqnumber);
861                         return 0;
862                 }
863         }
864         return -EINVAL;
865 }
866