summaryrefslogtreecommitdiff
path: root/vere/ext/gmp/gen/x86_64-macos/mpn/sub_err2_n.s
blob: bfa02d4809f310d4ca0f572b3fcb0508301c37ce (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184














































































	
	





	.text
	.align	4, 0x90
	.globl	___gmpn_sub_err2_n
	
	
___gmpn_sub_err2_n:

	mov	16(%rsp), %rax
	mov	8(%rsp), %r10

	push	%rbx
	push	%rbp
	push	%r12
	push	%r13
	push	%r14

	xor	%ebp, %ebp
	xor	%r11d, %r11d
	xor	%r12d, %r12d
	xor	%r13d, %r13d

	sub	%r8, %r9

	lea	(%rdi,%r10,8), %rdi
	lea	(%rsi,%r10,8), %rsi
	lea	(%rdx,%r10,8), %rdx

	test	$1, %r10
	jnz	Lodd

	lea	-8(%r8,%r10,8), %r8
	neg	%r10
	jmp	Ltop

	.align	4, 0x90
Lodd:
	lea	-16(%r8,%r10,8), %r8
	neg	%r10
	shr	$1, %rax
	mov	(%rsi,%r10,8), %rbx
	sbb	(%rdx,%r10,8), %rbx
	cmovc	8(%r8), %rbp
	cmovc	8(%r8,%r9), %r12
	mov	%rbx, (%rdi,%r10,8)
	sbb	%rax, %rax
	inc	%r10
	jz	Lend

	.align	4, 0x90
Ltop:
	mov	(%rsi,%r10,8), %rbx
	shr	$1, %rax		
	sbb	(%rdx,%r10,8), %rbx
	mov	%rbx, (%rdi,%r10,8)
	sbb	%r14, %r14	

	mov	8(%rsi,%r10,8), %rbx
	sbb	8(%rdx,%r10,8), %rbx
	mov	%rbx, 8(%rdi,%r10,8)
	sbb	%rax, %rax	

	mov	(%r8), %rbx	
	and	%r14, %rbx
	add	%rbx, %rbp
	adc	$0, %r11

	and	(%r8,%r9), %r14	
	add	%r14, %r12
	adc	$0, %r13

	mov	-8(%r8), %rbx	
	and	%rax, %rbx
	add	%rbx, %rbp
	adc	$0, %r11

	mov	-8(%r8,%r9), %rbx	
	and	%rax, %rbx
	add	%rbx, %r12
	adc	$0, %r13

	add	$2, %r10
	lea	-16(%r8), %r8
	jnz	Ltop
Lend:

	mov	%rbp, (%rcx)
	mov	%r11, 8(%rcx)
	mov	%r12, 16(%rcx)
	mov	%r13, 24(%rcx)

	and	$1, %eax	

	pop	%r14
	pop	%r13
	pop	%r12
	pop	%rbp
	pop	%rbx
	ret