diff options
author | polwex <polwex@sortug.com> | 2025-10-05 21:56:51 +0700 |
---|---|---|
committer | polwex <polwex@sortug.com> | 2025-10-05 21:56:51 +0700 |
commit | fcedfddf00b3f994e4f4e40332ac7fc192c63244 (patch) | |
tree | 51d38e62c7bdfcc5f9a5e9435fe820c93cfc9a3d /vere/ext/gmp/gen/x86_64-linux/mpn/add_n.s |
claude is gud
Diffstat (limited to 'vere/ext/gmp/gen/x86_64-linux/mpn/add_n.s')
-rw-r--r-- | vere/ext/gmp/gen/x86_64-linux/mpn/add_n.s | 194 |
1 files changed, 194 insertions, 0 deletions
diff --git a/vere/ext/gmp/gen/x86_64-linux/mpn/add_n.s b/vere/ext/gmp/gen/x86_64-linux/mpn/add_n.s new file mode 100644 index 0000000..14cc32b --- /dev/null +++ b/vere/ext/gmp/gen/x86_64-linux/mpn/add_n.s @@ -0,0 +1,194 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + .text + .align 16, 0x90 + .globl __gmpn_add_nc + .type __gmpn_add_nc,@function + +__gmpn_add_nc: + + + + mov %ecx, %eax + shr $2, %rcx + and $3, %eax + bt $0, %r8 + jrcxz .Llt4 + + mov (%rsi), %r8 + mov 8(%rsi), %r9 + dec %rcx + jmp .Lmid + + .size __gmpn_add_nc,.-__gmpn_add_nc + .align 16, 0x90 + .globl __gmpn_add_n + .type __gmpn_add_n,@function + +__gmpn_add_n: + + + mov %ecx, %eax + shr $2, %rcx + and $3, %eax + jrcxz .Llt4 + + mov (%rsi), %r8 + mov 8(%rsi), %r9 + dec %rcx + jmp .Lmid + +.Llt4: dec %eax + mov (%rsi), %r8 + jnz .L2 + adc (%rdx), %r8 + mov %r8, (%rdi) + adc %eax, %eax + + ret + +.L2: dec %eax + mov 8(%rsi), %r9 + jnz .L3 + adc (%rdx), %r8 + adc 8(%rdx), %r9 + mov %r8, (%rdi) + mov %r9, 8(%rdi) + adc %eax, %eax + + ret + +.L3: mov 16(%rsi), %r10 + adc (%rdx), %r8 + adc 8(%rdx), %r9 + adc 16(%rdx), %r10 + mov %r8, (%rdi) + mov %r9, 8(%rdi) + mov %r10, 16(%rdi) + setc %al + + ret + + .align 16, 0x90 +.Ltop: adc (%rdx), %r8 + adc 8(%rdx), %r9 + adc 16(%rdx), %r10 + adc 24(%rdx), %r11 + mov %r8, (%rdi) + lea 32(%rsi), %rsi + mov %r9, 8(%rdi) + mov %r10, 16(%rdi) + dec %rcx + mov %r11, 24(%rdi) + lea 32(%rdx), %rdx + mov (%rsi), %r8 + mov 8(%rsi), %r9 + lea 32(%rdi), %rdi +.Lmid: mov 16(%rsi), %r10 + mov 24(%rsi), %r11 + jnz .Ltop + +.Lend: lea 32(%rsi), %rsi + adc (%rdx), %r8 + adc 8(%rdx), %r9 + adc 16(%rdx), %r10 + adc 24(%rdx), %r11 + lea 32(%rdx), %rdx + mov %r8, (%rdi) + mov %r9, 8(%rdi) + mov %r10, 16(%rdi) + mov %r11, 24(%rdi) + lea 32(%rdi), %rdi + + inc %eax + dec %eax + jnz .Llt4 + adc %eax, %eax + + ret + .size __gmpn_add_n,.-__gmpn_add_n |