diff options
author | polwex <polwex@sortug.com> | 2025-10-05 21:56:51 +0700 |
---|---|---|
committer | polwex <polwex@sortug.com> | 2025-10-05 21:56:51 +0700 |
commit | fcedfddf00b3f994e4f4e40332ac7fc192c63244 (patch) | |
tree | 51d38e62c7bdfcc5f9a5e9435fe820c93cfc9a3d /vere/ext/gmp/gen/x86_64-macos/mpn/cnd_add_n.s |
claude is gud
Diffstat (limited to 'vere/ext/gmp/gen/x86_64-macos/mpn/cnd_add_n.s')
-rw-r--r-- | vere/ext/gmp/gen/x86_64-macos/mpn/cnd_add_n.s | 184 |
1 files changed, 184 insertions, 0 deletions
diff --git a/vere/ext/gmp/gen/x86_64-macos/mpn/cnd_add_n.s b/vere/ext/gmp/gen/x86_64-macos/mpn/cnd_add_n.s new file mode 100644 index 0000000..274a59f --- /dev/null +++ b/vere/ext/gmp/gen/x86_64-macos/mpn/cnd_add_n.s @@ -0,0 +1,184 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + .text + .align 4, 0x90 + .globl ___gmpn_cnd_add_n + + +___gmpn_cnd_add_n: + + + + push %rbx + + neg %rdi + sbb %rbx, %rbx + + test $1, %r8b + jz Lx0 +Lx1: test $2, %r8b + jz Lb1 + +Lb3: mov (%rcx), %rdi + mov 8(%rcx), %r9 + mov 16(%rcx), %r10 + and %rbx, %rdi + and %rbx, %r9 + and %rbx, %r10 + add (%rdx), %rdi + mov %rdi, (%rsi) + adc 8(%rdx), %r9 + mov %r9, 8(%rsi) + adc 16(%rdx), %r10 + mov %r10, 16(%rsi) + sbb %eax, %eax + lea 24(%rdx), %rdx + lea 24(%rcx), %rcx + lea 24(%rsi), %rsi + sub $3, %r8 + jnz Ltop + jmp Lend + +Lx0: xor %eax, %eax + test $2, %r8b + jz Ltop + +Lb2: mov (%rcx), %rdi + mov 8(%rcx), %r9 + and %rbx, %rdi + and %rbx, %r9 + add (%rdx), %rdi + mov %rdi, (%rsi) + adc 8(%rdx), %r9 + mov %r9, 8(%rsi) + sbb %eax, %eax + lea 16(%rdx), %rdx + lea 16(%rcx), %rcx + lea 16(%rsi), %rsi + sub $2, %r8 + jnz Ltop + jmp Lend + +Lb1: mov (%rcx), %rdi + and %rbx, %rdi + add (%rdx), %rdi + mov %rdi, (%rsi) + sbb %eax, %eax + lea 8(%rdx), %rdx + lea 8(%rcx), %rcx + lea 8(%rsi), %rsi + dec %r8 + jz Lend + + .align 4, 0x90 +Ltop: mov (%rcx), %rdi + mov 8(%rcx), %r9 + mov 16(%rcx), %r10 + mov 24(%rcx), %r11 + lea 32(%rcx), %rcx + and %rbx, %rdi + and %rbx, %r9 + and %rbx, %r10 + and %rbx, %r11 + add %eax, %eax + adc (%rdx), %rdi + mov %rdi, (%rsi) + adc 8(%rdx), %r9 + mov %r9, 8(%rsi) + adc 16(%rdx), %r10 + mov %r10, 16(%rsi) + adc 24(%rdx), %r11 + lea 32(%rdx), %rdx + mov %r11, 24(%rsi) + lea 32(%rsi), %rsi + sbb %eax, %eax + sub $4, %r8 + jnz Ltop + +Lend: neg %eax + pop %rbx + + ret + |