GNU Linux-libre 4.14.259-gnu1
[releases.git] / arch / x86 / math-emu / round_Xsig.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*---------------------------------------------------------------------------+
3  |  round_Xsig.S                                                             |
4  |                                                                           |
5  | Copyright (C) 1992,1993,1994,1995                                         |
6  |                       W. Metzenthen, 22 Parker St, Ormond, Vic 3163,      |
7  |                       Australia.  E-mail billm@jacobi.maths.monash.edu.au |
8  |                                                                           |
9  | Normalize and round a 12 byte quantity.                                   |
10  | Call from C as:                                                           |
11  |   int round_Xsig(Xsig *n)                                                 |
12  |                                                                           |
13  | Normalize a 12 byte quantity.                                             |
14  | Call from C as:                                                           |
15  |   int norm_Xsig(Xsig *n)                                                  |
16  |                                                                           |
17  | Each function returns the size of the shift (nr of bits).                 |
18  |                                                                           |
19  +---------------------------------------------------------------------------*/
20         .file   "round_Xsig.S"
21
22 #include "fpu_emu.h"
23
24
25 .text
26 ENTRY(round_Xsig)
27         pushl   %ebp
28         movl    %esp,%ebp
29         pushl   %ebx            /* Reserve some space */
30         pushl   %ebx
31         pushl   %esi
32
33         movl    PARAM1,%esi
34
35         movl    8(%esi),%edx
36         movl    4(%esi),%ebx
37         movl    (%esi),%eax
38
39         movl    $0,-4(%ebp)
40
41         orl     %edx,%edx       /* ms bits */
42         js      L_round         /* Already normalized */
43         jnz     L_shift_1       /* Shift left 1 - 31 bits */
44
45         movl    %ebx,%edx
46         movl    %eax,%ebx
47         xorl    %eax,%eax
48         movl    $-32,-4(%ebp)
49
50 /* We need to shift left by 1 - 31 bits */
51 L_shift_1:
52         bsrl    %edx,%ecx       /* get the required shift in %ecx */
53         subl    $31,%ecx
54         negl    %ecx
55         subl    %ecx,-4(%ebp)
56         shld    %cl,%ebx,%edx
57         shld    %cl,%eax,%ebx
58         shl     %cl,%eax
59
60 L_round:
61         testl   $0x80000000,%eax
62         jz      L_exit
63
64         addl    $1,%ebx
65         adcl    $0,%edx
66         jnz     L_exit
67
68         movl    $0x80000000,%edx
69         incl    -4(%ebp)
70
71 L_exit:
72         movl    %edx,8(%esi)
73         movl    %ebx,4(%esi)
74         movl    %eax,(%esi)
75
76         movl    -4(%ebp),%eax
77
78         popl    %esi
79         popl    %ebx
80         leave
81         ret
82 ENDPROC(round_Xsig)
83
84
85
86 ENTRY(norm_Xsig)
87         pushl   %ebp
88         movl    %esp,%ebp
89         pushl   %ebx            /* Reserve some space */
90         pushl   %ebx
91         pushl   %esi
92
93         movl    PARAM1,%esi
94
95         movl    8(%esi),%edx
96         movl    4(%esi),%ebx
97         movl    (%esi),%eax
98
99         movl    $0,-4(%ebp)
100
101         orl     %edx,%edx       /* ms bits */
102         js      L_n_exit                /* Already normalized */
103         jnz     L_n_shift_1     /* Shift left 1 - 31 bits */
104
105         movl    %ebx,%edx
106         movl    %eax,%ebx
107         xorl    %eax,%eax
108         movl    $-32,-4(%ebp)
109
110         orl     %edx,%edx       /* ms bits */
111         js      L_n_exit        /* Normalized now */
112         jnz     L_n_shift_1     /* Shift left 1 - 31 bits */
113
114         movl    %ebx,%edx
115         movl    %eax,%ebx
116         xorl    %eax,%eax
117         addl    $-32,-4(%ebp)
118         jmp     L_n_exit        /* Might not be normalized,
119                                    but shift no more. */
120
121 /* We need to shift left by 1 - 31 bits */
122 L_n_shift_1:
123         bsrl    %edx,%ecx       /* get the required shift in %ecx */
124         subl    $31,%ecx
125         negl    %ecx
126         subl    %ecx,-4(%ebp)
127         shld    %cl,%ebx,%edx
128         shld    %cl,%eax,%ebx
129         shl     %cl,%eax
130
131 L_n_exit:
132         movl    %edx,8(%esi)
133         movl    %ebx,4(%esi)
134         movl    %eax,(%esi)
135
136         movl    -4(%ebp),%eax
137
138         popl    %esi
139         popl    %ebx
140         leave
141         ret
142 ENDPROC(norm_Xsig)