1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
|
.text
.align 5, 0x90
.globl ___gmpn_mullo_basecase
___gmpn_mullo_basecase:
cmp $4, %ecx
jae Lbig
mov %rdx, %r11
mov (%rsi), %rdx
cmp $2, %ecx
jae Lgt1
Ln1: imul (%r11), %rdx
mov %rdx, (%rdi)
ret
Lgt1: ja Lgt2
Ln2: mov (%r11), %r9
.byte 0xc4,194,251,0xf6,209
mov %rax, (%rdi)
mov 8(%rsi), %rax
imul %r9, %rax
add %rax, %rdx
mov 8(%r11), %r9
mov (%rsi), %rcx
imul %r9, %rcx
add %rcx, %rdx
mov %rdx, 8(%rdi)
ret
Lgt2:
Ln3: mov (%r11), %r9
.byte 0xc4,66,251,0xf6,209
mov %rax, (%rdi)
mov 8(%rsi), %rdx
.byte 0xc4,194,251,0xf6,209
imul 16(%rsi), %r9
add %rax, %r10
adc %rdx, %r9
mov 8(%r11), %r8
mov (%rsi), %rdx
.byte 0xc4,194,251,0xf6,208
add %rax, %r10
adc %rdx, %r9
imul 8(%rsi), %r8
add %r8, %r9
mov %r10, 8(%rdi)
mov 16(%r11), %r10
mov (%rsi), %rax
imul %rax, %r10
add %r10, %r9
mov %r9, 16(%rdi)
ret
.align 4, 0x90
Lbig: push %r14
push %r12
push %rbx
push %rbp
mov -8(%rdx,%rcx,8), %r14
imul (%rsi), %r14
lea -3(%rcx), %ebp
lea 8(%rdx), %r11
mov (%rdx), %rdx
mov %ecx, %eax
shr $3, %ecx
and $7, %eax
lea Lmtab(%rip), %r10
movslq (%r10,%rax,4), %rax
lea (%rax, %r10), %r10
jmp *%r10
Lmf0: .byte 0xc4,98,171,0xf6,6
lea 56(%rsi), %rsi
lea -8(%rdi), %rdi
lea Lf7(%rip), %rbx
jmp Lmb0
Lmf3: .byte 0xc4,226,179,0xf6,6
lea 16(%rsi), %rsi
lea 16(%rdi), %rdi
jrcxz Lmc
inc %ecx
lea Lf2(%rip), %rbx
jmp Lmb3
Lmc: .byte 0xc4,98,171,0xf6,70,248
add %rax, %r10
mov %r9, -16(%rdi)
.byte 0xc4,226,179,0xf6,6
mov %r10, -8(%rdi)
adc %r8, %r9
mov %r9, (%rdi)
jmp Lc2
Lmf4: .byte 0xc4,98,171,0xf6,6
lea 24(%rsi), %rsi
lea 24(%rdi), %rdi
inc %ecx
lea Lf3(%rip), %rbx
jmp Lmb4
Lmf5: .byte 0xc4,226,179,0xf6,6
lea 32(%rsi), %rsi
lea 32(%rdi), %rdi
inc %ecx
lea Lf4(%rip), %rbx
jmp Lmb5
Lmf6: .byte 0xc4,98,171,0xf6,6
lea 40(%rsi), %rsi
lea 40(%rdi), %rdi
inc %ecx
lea Lf5(%rip), %rbx
jmp Lmb6
Lmf7: .byte 0xc4,226,179,0xf6,6
lea 48(%rsi), %rsi
lea 48(%rdi), %rdi
lea Lf6(%rip), %rbx
jmp Lmb7
Lmf1: .byte 0xc4,226,179,0xf6,6
lea Lf0(%rip), %rbx
jmp Lmb1
Lmf2: .byte 0xc4,98,171,0xf6,6
lea 8(%rsi), %rsi
lea 8(%rdi), %rdi
lea Lf1(%rip), %rbx
.byte 0xc4,226,179,0xf6,6
.align 5, 0x90
Lmtop:mov %r10, -8(%rdi)
adc %r8, %r9
Lmb1: .byte 0xc4,98,171,0xf6,70,8
adc %rax, %r10
lea 64(%rsi), %rsi
mov %r9, (%rdi)
Lmb0: mov %r10, 8(%rdi)
.byte 0xc4,226,179,0xf6,70,208
lea 64(%rdi), %rdi
adc %r8, %r9
Lmb7: .byte 0xc4,98,171,0xf6,70,216
mov %r9, -48(%rdi)
adc %rax, %r10
Lmb6: mov %r10, -40(%rdi)
.byte 0xc4,226,179,0xf6,70,224
adc %r8, %r9
Lmb5: .byte 0xc4,98,171,0xf6,70,232
mov %r9, -32(%rdi)
adc %rax, %r10
Lmb4: .byte 0xc4,226,179,0xf6,70,240
mov %r10, -24(%rdi)
adc %r8, %r9
Lmb3: .byte 0xc4,98,171,0xf6,70,248
adc %rax, %r10
mov %r9, -16(%rdi)
dec %ecx
.byte 0xc4,226,179,0xf6,6
jnz Lmtop
Lmend:mov %r10, -8(%rdi)
adc %r8, %r9
mov %r9, (%rdi)
adc %rcx, %rax
lea 8(,%rbp,8), %r12
neg %r12
shr $3, %ebp
jmp Lent
Lf0: .byte 0xc4,98,171,0xf6,6
lea -8(%rsi), %rsi
lea -8(%rdi), %rdi
lea Lf7(%rip), %rbx
jmp Lb0
Lf1: .byte 0xc4,226,179,0xf6,6
lea -1(%rbp), %ebp
lea Lf0(%rip), %rbx
jmp Lb1
Lend: .byte 0xf3,76,0x0f,0x38,0xf6,15
mov %r9, (%rdi)
.byte 0xf3,72,0x0f,0x38,0xf6,193
adc %rcx, %rax
lea 8(%r12), %r12
Lent: .byte 0xc4,98,171,0xf6,70,8
add %rax, %r14
add %r10, %r14
lea (%rsi,%r12), %rsi
lea 8(%rdi,%r12), %rdi
mov (%r11), %rdx
lea 8(%r11), %r11
or %ebp, %ecx
jmp *%rbx
Lf7: .byte 0xc4,226,179,0xf6,6
lea -16(%rsi), %rsi
lea -16(%rdi), %rdi
lea Lf6(%rip), %rbx
jmp Lb7
Lf2: .byte 0xc4,98,171,0xf6,6
lea 8(%rsi), %rsi
lea 8(%rdi), %rdi
.byte 0xc4,226,179,0xf6,6
lea Lf1(%rip), %rbx
.align 5, 0x90
Ltop: .byte 0xf3,76,0x0f,0x38,0xf6,87,248
.byte 0x66,77,0x0f,0x38,0xf6,200
mov %r10, -8(%rdi)
jrcxz Lend
Lb1: .byte 0xc4,98,171,0xf6,70,8
.byte 0xf3,76,0x0f,0x38,0xf6,15
lea -1(%rcx), %ecx
mov %r9, (%rdi)
.byte 0x66,76,0x0f,0x38,0xf6,208
Lb0: .byte 0xc4,226,179,0xf6,70,16
.byte 0x66,77,0x0f,0x38,0xf6,200
.byte 0xf3,76,0x0f,0x38,0xf6,87,8
mov %r10, 8(%rdi)
Lb7: .byte 0xc4,98,171,0xf6,70,24
lea 64(%rsi), %rsi
.byte 0x66,76,0x0f,0x38,0xf6,208
.byte 0xf3,76,0x0f,0x38,0xf6,79,16
mov %r9, 16(%rdi)
Lb6: .byte 0xc4,226,179,0xf6,70,224
.byte 0xf3,76,0x0f,0x38,0xf6,87,24
.byte 0x66,77,0x0f,0x38,0xf6,200
mov %r10, 24(%rdi)
Lb5: .byte 0xc4,98,171,0xf6,70,232
.byte 0x66,76,0x0f,0x38,0xf6,208
.byte 0xf3,76,0x0f,0x38,0xf6,79,32
mov %r9, 32(%rdi)
Lb4: .byte 0xc4,226,179,0xf6,70,240
.byte 0xf3,76,0x0f,0x38,0xf6,87,40
.byte 0x66,77,0x0f,0x38,0xf6,200
mov %r10, 40(%rdi)
Lb3: .byte 0xf3,76,0x0f,0x38,0xf6,79,48
.byte 0xc4,98,171,0xf6,70,248
mov %r9, 48(%rdi)
lea 64(%rdi), %rdi
.byte 0x66,76,0x0f,0x38,0xf6,208
.byte 0xc4,226,179,0xf6,6
jmp Ltop
Lf6: .byte 0xc4,98,171,0xf6,6
lea 40(%rsi), %rsi
lea -24(%rdi), %rdi
lea Lf5(%rip), %rbx
jmp Lb6
Lf5: .byte 0xc4,226,179,0xf6,6
lea 32(%rsi), %rsi
lea -32(%rdi), %rdi
lea Lf4(%rip), %rbx
jmp Lb5
Lf4: .byte 0xc4,98,171,0xf6,6
lea 24(%rsi), %rsi
lea -40(%rdi), %rdi
lea Lf3(%rip), %rbx
jmp Lb4
Lf3: .byte 0xc4,226,179,0xf6,6
lea 16(%rsi), %rsi
lea -48(%rdi), %rdi
jrcxz Lcor
lea Lf2(%rip), %rbx
jmp Lb3
Lcor: .byte 0xf3,76,0x0f,0x38,0xf6,79,48
.byte 0xc4,98,171,0xf6,70,248
mov %r9, 48(%rdi)
lea 64(%rdi), %rdi
.byte 0x66,76,0x0f,0x38,0xf6,208
.byte 0xc4,226,179,0xf6,6
.byte 0xf3,76,0x0f,0x38,0xf6,87,248
.byte 0x66,77,0x0f,0x38,0xf6,200
mov %r10, -8(%rdi)
.byte 0xf3,76,0x0f,0x38,0xf6,15
mov %r9, (%rdi)
.byte 0xf3,72,0x0f,0x38,0xf6,193
Lc2:
.byte 0xc4,98,171,0xf6,70,8
adc %rax, %r14
add %r10, %r14
mov (%r11), %rdx
test %ecx, %ecx
.byte 0xc4,98,171,0xf6,70,240
.byte 0xc4,226,179,0xf6,70,248
.byte 0xf3,76,0x0f,0x38,0xf6,87,248
.byte 0x66,77,0x0f,0x38,0xf6,200
mov %r10, -8(%rdi)
.byte 0xf3,76,0x0f,0x38,0xf6,15
.byte 0xf3,72,0x0f,0x38,0xf6,193
adc %rcx, %rax
.byte 0xc4,98,171,0xf6,6
add %rax, %r14
add %r10, %r14
mov 8(%r11), %rdx
.byte 0xc4,226,243,0xf6,70,240
add %r9, %rcx
mov %rcx, (%rdi)
adc $0, %rax
.byte 0xc4,98,171,0xf6,70,248
add %rax, %r14
add %r10, %r14
mov %r14, 8(%rdi)
pop %rbp
pop %rbx
pop %r12
pop %r14
ret
.text
.align 3, 0x90
Lmtab:.set Lmf7_tmp, Lmf7-Lmtab
.long Lmf7_tmp
.set Lmf0_tmp, Lmf0-Lmtab
.long Lmf0_tmp
.set Lmf1_tmp, Lmf1-Lmtab
.long Lmf1_tmp
.set Lmf2_tmp, Lmf2-Lmtab
.long Lmf2_tmp
.set Lmf3_tmp, Lmf3-Lmtab
.long Lmf3_tmp
.set Lmf4_tmp, Lmf4-Lmtab
.long Lmf4_tmp
.set Lmf5_tmp, Lmf5-Lmtab
.long Lmf5_tmp
.set Lmf6_tmp, Lmf6-Lmtab
.long Lmf6_tmp
|