diff options
author | polwex <polwex@sortug.com> | 2025-10-05 21:56:51 +0700 |
---|---|---|
committer | polwex <polwex@sortug.com> | 2025-10-05 21:56:51 +0700 |
commit | fcedfddf00b3f994e4f4e40332ac7fc192c63244 (patch) | |
tree | 51d38e62c7bdfcc5f9a5e9435fe820c93cfc9a3d /vere/ext/gmp/gen/x86_64-windows/mpn/rsh1sub_n.s |
claude is gud
Diffstat (limited to 'vere/ext/gmp/gen/x86_64-windows/mpn/rsh1sub_n.s')
-rw-r--r-- | vere/ext/gmp/gen/x86_64-windows/mpn/rsh1sub_n.s | 222 |
1 files changed, 222 insertions, 0 deletions
diff --git a/vere/ext/gmp/gen/x86_64-windows/mpn/rsh1sub_n.s b/vere/ext/gmp/gen/x86_64-windows/mpn/rsh1sub_n.s new file mode 100644 index 0000000..da5512f --- /dev/null +++ b/vere/ext/gmp/gen/x86_64-windows/mpn/rsh1sub_n.s @@ -0,0 +1,222 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + .text + .align 16, 0x90 + .globl __gmpn_rsh1sub_nc + + .def __gmpn_rsh1sub_nc + .scl 2 + .type 32 + .endef +__gmpn_rsh1sub_nc: + + push %rdi + push %rsi + mov %rcx, %rdi + mov %rdx, %rsi + mov %r8, %rdx + mov %r9, %rcx + + mov 56(%rsp), %r8 + push %rbx + + xor %eax, %eax + neg %r8 + mov (%rsi), %rbx + sbb (%rdx), %rbx + jmp Lent + + + .align 16, 0x90 + .globl __gmpn_rsh1sub_n + + .def __gmpn_rsh1sub_n + .scl 2 + .type 32 + .endef +__gmpn_rsh1sub_n: + + push %rdi + push %rsi + mov %rcx, %rdi + mov %rdx, %rsi + mov %r8, %rdx + mov %r9, %rcx + + push %rbx + + xor %eax, %eax + mov (%rsi), %rbx + sub (%rdx), %rbx +Lent: + rcr %rbx + adc %eax, %eax + + mov %ecx, %r11d + and $3, %r11d + + cmp $1, %r11d + je Ldo + +Ln1: cmp $2, %r11d + jne Ln2 + add %rbx, %rbx + mov 8(%rsi), %r10 + sbb 8(%rdx), %r10 + lea 8(%rsi), %rsi + lea 8(%rdx), %rdx + lea 8(%rdi), %rdi + rcr %r10 + rcr %rbx + mov %rbx, -8(%rdi) + jmp Lcj1 + +Ln2: cmp $3, %r11d + jne Ln3 + add %rbx, %rbx + mov 8(%rsi), %r9 + mov 16(%rsi), %r10 + sbb 8(%rdx), %r9 + sbb 16(%rdx), %r10 + lea 16(%rsi), %rsi + lea 16(%rdx), %rdx + lea 16(%rdi), %rdi + rcr %r10 + rcr %r9 + rcr %rbx + mov %rbx, -16(%rdi) + jmp Lcj2 + +Ln3: dec %rcx + add %rbx, %rbx + mov 8(%rsi), %r8 + mov 16(%rsi), %r9 + sbb 8(%rdx), %r8 + sbb 16(%rdx), %r9 + mov 24(%rsi), %r10 + sbb 24(%rdx), %r10 + lea 24(%rsi), %rsi + lea 24(%rdx), %rdx + lea 24(%rdi), %rdi + rcr %r10 + rcr %r9 + rcr %r8 + rcr %rbx + mov %rbx, -24(%rdi) + mov %r8, -16(%rdi) +Lcj2: mov %r9, -8(%rdi) +Lcj1: mov %r10, %rbx + +Ldo: + shr $2, %rcx + je Lend + .align 16, 0x90 +Ltop: add %rbx, %rbx + + mov 8(%rsi), %r8 + mov 16(%rsi), %r9 + sbb 8(%rdx), %r8 + sbb 16(%rdx), %r9 + mov 24(%rsi), %r10 + mov 32(%rsi), %r11 + sbb 24(%rdx), %r10 + sbb 32(%rdx), %r11 + + lea 32(%rsi), %rsi + lea 32(%rdx), %rdx + + rcr %r11 + rcr %r10 + rcr %r9 + rcr %r8 + + rcr %rbx + mov %rbx, (%rdi) + mov %r8, 8(%rdi) + mov %r9, 16(%rdi) + mov %r10, 24(%rdi) + mov %r11, %rbx + + lea 32(%rdi), %rdi + dec %rcx + jne Ltop + +Lend: mov %rbx, (%rdi) + pop %rbx + pop %rsi + pop %rdi + ret + |