1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
|
.text
.align 16, 0x90
.globl __gmpn_addaddmul_1msb0
.type __gmpn_addaddmul_1msb0,@function
__gmpn_addaddmul_1msb0:
push %r12
push %rbp
lea (%rsi,%rcx,8), %rsi
lea (%rdx,%rcx,8), %rbp
lea (%rdi,%rcx,8), %rdi
neg %rcx
mov (%rsi,%rcx,8), %rax
mul %r8
mov %rax, %r12
mov (%rbp,%rcx,8), %rax
mov %rdx, %r10
add $3, %rcx
jns .Lend
.align 16, 0x90
.Ltop: mul %r9
add %rax, %r12
mov -16(%rsi,%rcx,8), %rax
adc %rdx, %r10
mov %r12, -24(%rdi,%rcx,8)
mul %r8
add %rax, %r10
mov -16(%rbp,%rcx,8), %rax
mov $0, %r11d
adc %rdx, %r11
mul %r9
add %rax, %r10
mov -8(%rsi,%rcx,8), %rax
adc %rdx, %r11
mov %r10, -16(%rdi,%rcx,8)
mul %r8
add %rax, %r11
mov -8(%rbp,%rcx,8), %rax
mov $0, %r12d
adc %rdx, %r12
mul %r9
add %rax, %r11
adc %rdx, %r12
mov (%rsi,%rcx,8), %rax
mul %r8
add %rax, %r12
mov %r11, -8(%rdi,%rcx,8)
mov (%rbp,%rcx,8), %rax
mov $0, %r10d
adc %rdx, %r10
add $3, %rcx
js .Ltop
.Lend: cmp $1, %ecx
ja 2f
jz 1f
mul %r9
add %rax, %r12
mov -16(%rsi), %rax
adc %rdx, %r10
mov %r12, -24(%rdi)
mul %r8
add %rax, %r10
mov -16(%rbp), %rax
mov $0, %r11d
adc %rdx, %r11
mul %r9
add %rax, %r10
mov -8(%rsi), %rax
adc %rdx, %r11
mov %r10, -16(%rdi)
mul %r8
add %rax, %r11
mov -8(%rbp), %rax
mov $0, %r12d
adc %rdx, %r12
mul %r9
add %rax, %r11
adc %rdx, %r12
mov %r11, -8(%rdi)
mov %r12, %rax
pop %rbp
pop %r12
ret
1: mul %r9
add %rax, %r12
mov -8(%rsi), %rax
adc %rdx, %r10
mov %r12, -16(%rdi)
mul %r8
add %rax, %r10
mov -8(%rbp), %rax
mov $0, %r11d
adc %rdx, %r11
mul %r9
add %rax, %r10
adc %rdx, %r11
mov %r10, -8(%rdi)
mov %r11, %rax
pop %rbp
pop %r12
ret
2: mul %r9
add %rax, %r12
mov %r12, -8(%rdi)
adc %rdx, %r10
mov %r10, %rax
pop %rbp
pop %r12
ret
.size __gmpn_addaddmul_1msb0,.-__gmpn_addaddmul_1msb0
|