summaryrefslogtreecommitdiffstats
path: root/security/nss/lib/freebl/mpi/mpi_amd64_sun.s
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /security/nss/lib/freebl/mpi/mpi_amd64_sun.s
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'security/nss/lib/freebl/mpi/mpi_amd64_sun.s')
-rw-r--r--security/nss/lib/freebl/mpi/mpi_amd64_sun.s385
1 files changed, 385 insertions, 0 deletions
diff --git a/security/nss/lib/freebl/mpi/mpi_amd64_sun.s b/security/nss/lib/freebl/mpi/mpi_amd64_sun.s
new file mode 100644
index 000000000..ddd5c40fd
--- /dev/null
+++ b/security/nss/lib/freebl/mpi/mpi_amd64_sun.s
@@ -0,0 +1,385 @@
+/ This Source Code Form is subject to the terms of the Mozilla Public
+/ License, v. 2.0. If a copy of the MPL was not distributed with this
+/ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+/ ------------------------------------------------------------------------
+/
+/ Implementation of s_mpv_mul_set_vec which exploits
+/ the 64X64->128 bit unsigned multiply instruction.
+/
+/ ------------------------------------------------------------------------
+
+/ r = a * digit, r and a are vectors of length len
+/ returns the carry digit
+/ r and a are 64 bit aligned.
+/
+/ uint64_t
+/ s_mpv_mul_set_vec64(uint64_t *r, uint64_t *a, int len, uint64_t digit)
+/
+
+.text; .align 16; .globl s_mpv_mul_set_vec64; .type s_mpv_mul_set_vec64, @function; s_mpv_mul_set_vec64:
+
+ xorq %rax, %rax / if (len == 0) return (0)
+ testq %rdx, %rdx
+ jz .L17
+
+ movq %rdx, %r8 / Use r8 for len; %rdx is used by mul
+ xorq %r9, %r9 / cy = 0
+
+.L15:
+ cmpq $8, %r8 / 8 - len
+ jb .L16
+ movq 0(%rsi), %rax / rax = a[0]
+ movq 8(%rsi), %r11 / prefetch a[1]
+ mulq %rcx / p = a[0] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 0(%rdi) / r[0] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 16(%rsi), %r11 / prefetch a[2]
+ mulq %rcx / p = a[1] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 8(%rdi) / r[1] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 24(%rsi), %r11 / prefetch a[3]
+ mulq %rcx / p = a[2] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 16(%rdi) / r[2] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 32(%rsi), %r11 / prefetch a[4]
+ mulq %rcx / p = a[3] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 24(%rdi) / r[3] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 40(%rsi), %r11 / prefetch a[5]
+ mulq %rcx / p = a[4] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 32(%rdi) / r[4] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 48(%rsi), %r11 / prefetch a[6]
+ mulq %rcx / p = a[5] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 40(%rdi) / r[5] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 56(%rsi), %r11 / prefetch a[7]
+ mulq %rcx / p = a[6] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 48(%rdi) / r[6] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ mulq %rcx / p = a[7] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 56(%rdi) / r[7] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ addq $64, %rsi
+ addq $64, %rdi
+ subq $8, %r8
+
+ jz .L17
+ jmp .L15
+
+.L16:
+ movq 0(%rsi), %rax
+ mulq %rcx / p = a[0] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 0(%rdi) / r[0] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L17
+
+ movq 8(%rsi), %rax
+ mulq %rcx / p = a[1] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 8(%rdi) / r[1] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L17
+
+ movq 16(%rsi), %rax
+ mulq %rcx / p = a[2] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 16(%rdi) / r[2] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L17
+
+ movq 24(%rsi), %rax
+ mulq %rcx / p = a[3] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 24(%rdi) / r[3] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L17
+
+ movq 32(%rsi), %rax
+ mulq %rcx / p = a[4] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 32(%rdi) / r[4] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L17
+
+ movq 40(%rsi), %rax
+ mulq %rcx / p = a[5] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 40(%rdi) / r[5] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L17
+
+ movq 48(%rsi), %rax
+ mulq %rcx / p = a[6] * digit
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 48(%rdi) / r[6] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L17
+
+
+.L17:
+ movq %r9, %rax
+ ret
+
+.size s_mpv_mul_set_vec64, .-s_mpv_mul_set_vec64
+
+/ ------------------------------------------------------------------------
+/
+/ Implementation of s_mpv_mul_add_vec which exploits
+/ the 64X64->128 bit unsigned multiply instruction.
+/
+/ ------------------------------------------------------------------------
+
+/ r += a * digit, r and a are vectors of length len
+/ returns the carry digit
+/ r and a are 64 bit aligned.
+/
+/ uint64_t
+/ s_mpv_mul_add_vec64(uint64_t *r, uint64_t *a, int len, uint64_t digit)
+/
+
+.text; .align 16; .globl s_mpv_mul_add_vec64; .type s_mpv_mul_add_vec64, @function; s_mpv_mul_add_vec64:
+
+ xorq %rax, %rax / if (len == 0) return (0)
+ testq %rdx, %rdx
+ jz .L27
+
+ movq %rdx, %r8 / Use r8 for len; %rdx is used by mul
+ xorq %r9, %r9 / cy = 0
+
+.L25:
+ cmpq $8, %r8 / 8 - len
+ jb .L26
+ movq 0(%rsi), %rax / rax = a[0]
+ movq 0(%rdi), %r10 / r10 = r[0]
+ movq 8(%rsi), %r11 / prefetch a[1]
+ mulq %rcx / p = a[0] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[0]
+ movq 8(%rdi), %r10 / prefetch r[1]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 0(%rdi) / r[0] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 16(%rsi), %r11 / prefetch a[2]
+ mulq %rcx / p = a[1] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[1]
+ movq 16(%rdi), %r10 / prefetch r[2]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 8(%rdi) / r[1] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 24(%rsi), %r11 / prefetch a[3]
+ mulq %rcx / p = a[2] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[2]
+ movq 24(%rdi), %r10 / prefetch r[3]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 16(%rdi) / r[2] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 32(%rsi), %r11 / prefetch a[4]
+ mulq %rcx / p = a[3] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[3]
+ movq 32(%rdi), %r10 / prefetch r[4]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 24(%rdi) / r[3] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 40(%rsi), %r11 / prefetch a[5]
+ mulq %rcx / p = a[4] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[4]
+ movq 40(%rdi), %r10 / prefetch r[5]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 32(%rdi) / r[4] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 48(%rsi), %r11 / prefetch a[6]
+ mulq %rcx / p = a[5] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[5]
+ movq 48(%rdi), %r10 / prefetch r[6]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 40(%rdi) / r[5] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ movq 56(%rsi), %r11 / prefetch a[7]
+ mulq %rcx / p = a[6] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[6]
+ movq 56(%rdi), %r10 / prefetch r[7]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 48(%rdi) / r[6] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ movq %r11, %rax
+ mulq %rcx / p = a[7] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[7]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 56(%rdi) / r[7] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+
+ addq $64, %rsi
+ addq $64, %rdi
+ subq $8, %r8
+
+ jz .L27
+ jmp .L25
+
+.L26:
+ movq 0(%rsi), %rax
+ movq 0(%rdi), %r10
+ mulq %rcx / p = a[0] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[0]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 0(%rdi) / r[0] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L27
+
+ movq 8(%rsi), %rax
+ movq 8(%rdi), %r10
+ mulq %rcx / p = a[1] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[1]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 8(%rdi) / r[1] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L27
+
+ movq 16(%rsi), %rax
+ movq 16(%rdi), %r10
+ mulq %rcx / p = a[2] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[2]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 16(%rdi) / r[2] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L27
+
+ movq 24(%rsi), %rax
+ movq 24(%rdi), %r10
+ mulq %rcx / p = a[3] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[3]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 24(%rdi) / r[3] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L27
+
+ movq 32(%rsi), %rax
+ movq 32(%rdi), %r10
+ mulq %rcx / p = a[4] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[4]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 32(%rdi) / r[4] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L27
+
+ movq 40(%rsi), %rax
+ movq 40(%rdi), %r10
+ mulq %rcx / p = a[5] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[5]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 40(%rdi) / r[5] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L27
+
+ movq 48(%rsi), %rax
+ movq 48(%rdi), %r10
+ mulq %rcx / p = a[6] * digit
+ addq %r10, %rax
+ adcq $0, %rdx / p += r[6]
+ addq %r9, %rax
+ adcq $0, %rdx / p += cy
+ movq %rax, 48(%rdi) / r[6] = lo(p)
+ movq %rdx, %r9 / cy = hi(p)
+ decq %r8
+ jz .L27
+
+
+.L27:
+ movq %r9, %rax
+ ret
+
+.size s_mpv_mul_add_vec64, .-s_mpv_mul_add_vec64