1. update clientset, deepcopy using code-generator

2. add a dummy file tools.go to force "go mod vendor" to see
code-generator as dependencies
3. add a script to update CRD
4. add a README to document CRD updating steps
run go mod tidy
update README
This commit is contained in:
xiangqian
2019-12-03 01:22:21 -08:00
parent 90533183e4
commit 728e29aa7e
1128 changed files with 167705 additions and 5135 deletions

View File

@@ -0,0 +1,134 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// MOVDDUP X2, X3
#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA
// MOVDDUP X4, X5
#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC
// MOVDDUP X6, X7
#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE
// MOVDDUP X8, X9
#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8
// ADDSUBPD X2, X3
#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA
// ADDSUBPD X4, X5
#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC
// ADDSUBPD X6, X7
#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE
// ADDSUBPD X8, X9
#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8
// func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyInc(SB), NOSPLIT, $0
MOVQ x_base+16(FP), SI // SI = &x
MOVQ y_base+40(FP), DI // DI = &y
MOVQ n+64(FP), CX // CX = n
CMPQ CX, $0 // if n==0 { return }
JE axpyi_end
MOVQ ix+88(FP), R8 // R8 = ix // Load the first index
SHLQ $4, R8 // R8 *= sizeof(complex128)
MOVQ iy+96(FP), R9 // R9 = iy
SHLQ $4, R9 // R9 *= sizeof(complex128)
LEAQ (SI)(R8*1), SI // SI = &(x[ix])
LEAQ (DI)(R9*1), DI // DI = &(y[iy])
MOVQ DI, DX // DX = DI // Separate Read/Write pointers
MOVQ incX+72(FP), R8 // R8 = incX
SHLQ $4, R8 // R8 *= sizeof(complex128)
MOVQ incY+80(FP), R9 // R9 = iy
SHLQ $4, R9 // R9 *= sizeof(complex128)
MOVUPS alpha+0(FP), X0 // X0 = { imag(a), real(a) }
MOVAPS X0, X1
SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) }
MOVAPS X0, X10 // Copy X0 and X1 for pipelining
MOVAPS X1, X11
MOVQ CX, BX
ANDQ $3, CX // CX = n % 4
SHRQ $2, BX // BX = floor( n / 4 )
JZ axpyi_tail // if BX == 0 { goto axpyi_tail }
axpyi_loop: // do {
MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVUPS (SI)(R8*1), X4
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2])
MOVUPS (SI), X6
MOVUPS (SI)(R8*1), X8
// X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_X2_X3
MOVDDUP_X4_X5
MOVDDUP_X6_X7
MOVDDUP_X8_X9
// X_i = { imag(x[i]), imag(x[i]) }
SHUFPD $0x3, X2, X2
SHUFPD $0x3, X4, X4
SHUFPD $0x3, X6, X6
SHUFPD $0x3, X8, X8
// X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPD X1, X2
MULPD X0, X3
MULPD X11, X4
MULPD X10, X5
MULPD X1, X6
MULPD X0, X7
MULPD X11, X8
MULPD X10, X9
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
// X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
ADDPD (DX), X3
ADDPD (DX)(R9*1), X5
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
ADDPD (DX), X7
ADDPD (DX)(R9*1), X9
MOVUPS X3, (DI) // dst[i] = X_(i+1)
MOVUPS X5, (DI)(R9*1)
LEAQ (DI)(R9*2), DI
MOVUPS X7, (DI)
MOVUPS X9, (DI)(R9*1)
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2])
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2])
DECQ BX
JNZ axpyi_loop // } while --BX > 0
CMPQ CX, $0 // if CX == 0 { return }
JE axpyi_end
axpyi_tail: // do {
MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) }
SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) }
MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
// X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
ADDPD (DI), X3
MOVUPS X3, (DI) // y[i] = X_i
ADDQ R8, SI // SI = &(SI[incX])
ADDQ R9, DI // DI = &(DI[incY])
LOOP axpyi_tail // } while --CX > 0
axpyi_end:
RET

View File

@@ -0,0 +1,141 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// MOVDDUP X2, X3
#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA
// MOVDDUP X4, X5
#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC
// MOVDDUP X6, X7
#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE
// MOVDDUP X8, X9
#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8
// ADDSUBPD X2, X3
#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA
// ADDSUBPD X4, X5
#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC
// ADDSUBPD X6, X7
#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE
// ADDSUBPD X8, X9
#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8
// func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyIncTo(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ x_base+56(FP), SI // SI = &x
MOVQ y_base+80(FP), DX // DX = &y
MOVQ n+104(FP), CX // CX = n
CMPQ CX, $0 // if n==0 { return }
JE axpyi_end
MOVQ ix+128(FP), R8 // R8 = ix // Load the first index
SHLQ $4, R8 // R8 *= sizeof(complex128)
MOVQ iy+136(FP), R9 // R9 = iy
SHLQ $4, R9 // R9 *= sizeof(complex128)
MOVQ idst+32(FP), R10 // R10 = idst
SHLQ $4, R10 // R10 *= sizeof(complex128)
LEAQ (SI)(R8*1), SI // SI = &(x[ix])
LEAQ (DX)(R9*1), DX // DX = &(y[iy])
LEAQ (DI)(R10*1), DI // DI = &(dst[idst])
MOVQ incX+112(FP), R8 // R8 = incX
SHLQ $4, R8 // R8 *= sizeof(complex128)
MOVQ incY+120(FP), R9 // R9 = incY
SHLQ $4, R9 // R9 *= sizeof(complex128)
MOVQ incDst+24(FP), R10 // R10 = incDst
SHLQ $4, R10 // R10 *= sizeof(complex128)
MOVUPS alpha+40(FP), X0 // X0 = { imag(a), real(a) }
MOVAPS X0, X1
SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) }
MOVAPS X0, X10 // Copy X0 and X1 for pipelining
MOVAPS X1, X11
MOVQ CX, BX
ANDQ $3, CX // CX = n % 4
SHRQ $2, BX // BX = floor( n / 4 )
JZ axpyi_tail // if BX == 0 { goto axpyi_tail }
axpyi_loop: // do {
MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVUPS (SI)(R8*1), X4
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2])
MOVUPS (SI), X6
MOVUPS (SI)(R8*1), X8
// X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_X2_X3
MOVDDUP_X4_X5
MOVDDUP_X6_X7
MOVDDUP_X8_X9
// X_i = { imag(x[i]), imag(x[i]) }
SHUFPD $0x3, X2, X2
SHUFPD $0x3, X4, X4
SHUFPD $0x3, X6, X6
SHUFPD $0x3, X8, X8
// X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPD X1, X2
MULPD X0, X3
MULPD X11, X4
MULPD X10, X5
MULPD X1, X6
MULPD X0, X7
MULPD X11, X8
MULPD X10, X9
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
// X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
ADDPD (DX), X3
ADDPD (DX)(R9*1), X5
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
ADDPD (DX), X7
ADDPD (DX)(R9*1), X9
MOVUPS X3, (DI) // dst[i] = X_(i+1)
MOVUPS X5, (DI)(R10*1)
LEAQ (DI)(R10*2), DI
MOVUPS X7, (DI)
MOVUPS X9, (DI)(R10*1)
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2])
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2])
DECQ BX
JNZ axpyi_loop // } while --BX > 0
CMPQ CX, $0 // if CX == 0 { return }
JE axpyi_end
axpyi_tail: // do {
MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) }
SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) }
MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
// X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
ADDPD (DX), X3
MOVUPS X3, (DI) // y[i] X_(i+1)
ADDQ R8, SI // SI += incX
ADDQ R9, DX // DX += incY
ADDQ R10, DI // DI += incDst
LOOP axpyi_tail // } while --CX > 0
axpyi_end:
RET

View File

@@ -0,0 +1,122 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// MOVDDUP X2, X3
#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA
// MOVDDUP X4, X5
#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC
// MOVDDUP X6, X7
#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE
// MOVDDUP X8, X9
#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8
// ADDSUBPD X2, X3
#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA
// ADDSUBPD X4, X5
#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC
// ADDSUBPD X6, X7
#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE
// ADDSUBPD X8, X9
#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8
// func AxpyUnitary(alpha complex128, x, y []complex128)
TEXT ·AxpyUnitary(SB), NOSPLIT, $0
MOVQ x_base+16(FP), SI // SI = &x
MOVQ y_base+40(FP), DI // DI = &y
MOVQ x_len+24(FP), CX // CX = min( len(x), len(y) )
CMPQ y_len+48(FP), CX
CMOVQLE y_len+48(FP), CX
CMPQ CX, $0 // if CX == 0 { return }
JE caxy_end
PXOR X0, X0 // Clear work registers and cache-align loop
PXOR X1, X1
MOVUPS alpha+0(FP), X0 // X0 = { imag(a), real(a) }
MOVAPS X0, X1
SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) }
XORQ AX, AX // i = 0
MOVAPS X0, X10 // Copy X0 and X1 for pipelining
MOVAPS X1, X11
MOVQ CX, BX
ANDQ $3, CX // CX = n % 4
SHRQ $2, BX // BX = floor( n / 4 )
JZ caxy_tail // if BX == 0 { goto caxy_tail }
caxy_loop: // do {
MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVUPS 16(SI)(AX*8), X4
MOVUPS 32(SI)(AX*8), X6
MOVUPS 48(SI)(AX*8), X8
// X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_X2_X3
MOVDDUP_X4_X5
MOVDDUP_X6_X7
MOVDDUP_X8_X9
// X_i = { imag(x[i]), imag(x[i]) }
SHUFPD $0x3, X2, X2
SHUFPD $0x3, X4, X4
SHUFPD $0x3, X6, X6
SHUFPD $0x3, X8, X8
// X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPD X1, X2
MULPD X0, X3
MULPD X11, X4
MULPD X10, X5
MULPD X1, X6
MULPD X0, X7
MULPD X11, X8
MULPD X10, X9
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
// X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
ADDPD (DI)(AX*8), X3
ADDPD 16(DI)(AX*8), X5
ADDPD 32(DI)(AX*8), X7
ADDPD 48(DI)(AX*8), X9
MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1)
MOVUPS X5, 16(DI)(AX*8)
MOVUPS X7, 32(DI)(AX*8)
MOVUPS X9, 48(DI)(AX*8)
ADDQ $8, AX // i += 8
DECQ BX
JNZ caxy_loop // } while --BX > 0
CMPQ CX, $0 // if CX == 0 { return }
JE caxy_end
caxy_tail: // do {
MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) }
SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) }
MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
// X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
ADDPD (DI)(AX*8), X3
MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1)
ADDQ $2, AX // i += 2
LOOP caxy_tail // } while --CX > 0
caxy_end:
RET

View File

@@ -0,0 +1,123 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// MOVDDUP X2, X3
#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA
// MOVDDUP X4, X5
#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC
// MOVDDUP X6, X7
#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE
// MOVDDUP X8, X9
#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8
// ADDSUBPD X2, X3
#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA
// ADDSUBPD X4, X5
#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC
// ADDSUBPD X6, X7
#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE
// ADDSUBPD X8, X9
#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8
// func AxpyUnitaryTo(dst []complex128, alpha complex64, x, y []complex128)
TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ x_base+40(FP), SI // SI = &x
MOVQ y_base+64(FP), DX // DX = &y
MOVQ x_len+48(FP), CX // CX = min( len(x), len(y), len(dst) )
CMPQ y_len+72(FP), CX
CMOVQLE y_len+72(FP), CX
CMPQ dst_len+8(FP), CX
CMOVQLE dst_len+8(FP), CX
CMPQ CX, $0 // if CX == 0 { return }
JE caxy_end
MOVUPS alpha+24(FP), X0 // X0 = { imag(a), real(a) }
MOVAPS X0, X1
SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) }
XORQ AX, AX // i = 0
MOVAPS X0, X10 // Copy X0 and X1 for pipelining
MOVAPS X1, X11
MOVQ CX, BX
ANDQ $3, CX // CX = n % 4
SHRQ $2, BX // BX = floor( n / 4 )
JZ caxy_tail // if BX == 0 { goto caxy_tail }
caxy_loop: // do {
MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVUPS 16(SI)(AX*8), X4
MOVUPS 32(SI)(AX*8), X6
MOVUPS 48(SI)(AX*8), X8
// X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_X2_X3 // Load and duplicate imag elements (xi, xi)
MOVDDUP_X4_X5
MOVDDUP_X6_X7
MOVDDUP_X8_X9
// X_i = { imag(x[i]), imag(x[i]) }
SHUFPD $0x3, X2, X2 // duplicate real elements (xr, xr)
SHUFPD $0x3, X4, X4
SHUFPD $0x3, X6, X6
SHUFPD $0x3, X8, X8
// X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPD X1, X2
MULPD X0, X3
MULPD X11, X4
MULPD X10, X5
MULPD X1, X6
MULPD X0, X7
MULPD X11, X8
MULPD X10, X9
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
// X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
ADDPD (DX)(AX*8), X3
ADDPD 16(DX)(AX*8), X5
ADDPD 32(DX)(AX*8), X7
ADDPD 48(DX)(AX*8), X9
MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1)
MOVUPS X5, 16(DI)(AX*8)
MOVUPS X7, 32(DI)(AX*8)
MOVUPS X9, 48(DI)(AX*8)
ADDQ $8, AX // i += 8
DECQ BX
JNZ caxy_loop // } while --BX > 0
CMPQ CX, $0 // if CX == 0 { return }
JE caxy_end
caxy_tail: // Same calculation, but read in values to avoid trampling memory
MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) }
SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) }
MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
// X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
ADDPD (DX)(AX*8), X3
MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1)
ADDQ $2, AX // i += 2
LOOP caxy_tail // } while --CX > 0
caxy_end:
RET

6
vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go generated vendored Normal file
View File

@@ -0,0 +1,6 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package c128 provides complex128 vector primitives.
package c128 // import "gonum.org/v1/gonum/internal/asm/c128"

View File

@@ -0,0 +1,153 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVDDUP_XPTR__X3 LONG $0x1E120FF2 // MOVDDUP (SI), X3
#define MOVDDUP_XPTR_INCX__X5 LONG $0x120F42F2; WORD $0x062C // MOVDDUP (SI)(R8*1), X5
#define MOVDDUP_XPTR_INCX_2__X7 LONG $0x120F42F2; WORD $0x463C // MOVDDUP (SI)(R8*2), X7
#define MOVDDUP_XPTR_INCx3X__X9 LONG $0x120F46F2; WORD $0x0E0C // MOVDDUP (SI)(R9*1), X9
#define MOVDDUP_8_XPTR__X2 LONG $0x56120FF2; BYTE $0x08 // MOVDDUP 8(SI), X2
#define MOVDDUP_8_XPTR_INCX__X4 LONG $0x120F42F2; WORD $0x0664; BYTE $0x08 // MOVDDUP 8(SI)(R8*1), X4
#define MOVDDUP_8_XPTR_INCX_2__X6 LONG $0x120F42F2; WORD $0x4674; BYTE $0x08 // MOVDDUP 8(SI)(R8*2), X6
#define MOVDDUP_8_XPTR_INCx3X__X8 LONG $0x120F46F2; WORD $0x0E44; BYTE $0x08 // MOVDDUP 8(SI)(R9*1), X8
#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3
#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5
#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7
#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define SUM X0
#define P_SUM X1
#define INC_X R8
#define INCx3_X R9
#define INC_Y R10
#define INCx3_Y R11
#define NEG1 X15
#define P_NEG1 X14
// func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128)
TEXT ·DotcInc(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
MOVQ n+48(FP), LEN // LEN = n
PXOR SUM, SUM // SUM = 0
CMPQ LEN, $0 // if LEN == 0 { return }
JE dot_end
PXOR P_SUM, P_SUM // P_SUM = 0
MOVQ ix+72(FP), INC_X // INC_X = ix * sizeof(complex128)
SHLQ $4, INC_X
MOVQ iy+80(FP), INC_Y // INC_Y = iy * sizeof(complex128)
SHLQ $4, INC_Y
LEAQ (X_PTR)(INC_X*1), X_PTR // X_PTR = &(X_PTR[ix])
LEAQ (Y_PTR)(INC_Y*1), Y_PTR // Y_PTR = &(Y_PTR[iy])
MOVQ incX+56(FP), INC_X // INC_X = incX
SHLQ $4, INC_X // INC_X *= sizeof(complex128)
MOVQ incY+64(FP), INC_Y // INC_Y = incY
SHLQ $4, INC_Y // INC_Y *= sizeof(complex128)
MOVSD $(-1.0), NEG1
SHUFPD $0, NEG1, NEG1 // { -1, -1 }
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = n % 4
SHRQ $2, LEN // LEN = floor( n / 4 )
JZ dot_tail // if n <= 4 { goto dot_tail }
MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = 3 * incX * sizeof(complex128)
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = 3 * incY * sizeof(complex128)
dot_loop: // do {
MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_XPTR_INCX__X5
MOVDDUP_XPTR_INCX_2__X7
MOVDDUP_XPTR_INCx3X__X9
MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) }
MOVDDUP_8_XPTR_INCX__X4
MOVDDUP_8_XPTR_INCX_2__X6
MOVDDUP_8_XPTR_INCx3X__X8
// X_i = { -imag(x[i]), -imag(x[i]) }
MULPD NEG1, X2
MULPD P_NEG1, X4
MULPD NEG1, X6
MULPD P_NEG1, X8
// X_j = { imag(y[i]), real(y[i]) }
MOVUPS (Y_PTR), X10
MOVUPS (Y_PTR)(INC_Y*1), X11
MOVUPS (Y_PTR)(INC_Y*2), X12
MOVUPS (Y_PTR)(INCx3_Y*1), X13
// X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPD X10, X3
MULPD X11, X5
MULPD X12, X7
MULPD X13, X9
// X_j = { real(y[i]), imag(y[i]) }
SHUFPD $0x1, X10, X10
SHUFPD $0x1, X11, X11
SHUFPD $0x1, X12, X12
SHUFPD $0x1, X13, X13
// X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPD X10, X2
MULPD X11, X4
MULPD X12, X6
MULPD X13, X8
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
// psum += result[i]
ADDPD X3, SUM
ADDPD X5, P_SUM
ADDPD X7, SUM
ADDPD X9, P_SUM
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4])
DECQ LEN
JNZ dot_loop // } while --LEN > 0
ADDPD P_SUM, SUM // sum += psum
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dot_end
dot_tail: // do {
MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) }
MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) }
MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]) , real(y[i]) }
MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) }
MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDPD X3, SUM // sum += result[i]
ADDQ INC_X, X_PTR // X_PTR += incX
ADDQ INC_Y, Y_PTR // Y_PTR += incY
DECQ TAIL
JNZ dot_tail // } while --TAIL > 0
dot_end:
MOVUPS SUM, sum+88(FP)
RET

View File

@@ -0,0 +1,143 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3
#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5
#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7
#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9
#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2
#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4
#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6
#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8
#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3
#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5
#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7
#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define SUM X0
#define P_SUM X1
#define IDX AX
#define I_IDX DX
#define NEG1 X15
#define P_NEG1 X14
// func DotcUnitary(x, y []complex128) (sum complex128)
TEXT ·DotcUnitary(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) )
CMPQ y_len+32(FP), LEN
CMOVQLE y_len+32(FP), LEN
PXOR SUM, SUM // sum = 0
CMPQ LEN, $0 // if LEN == 0 { return }
JE dot_end
XORPS P_SUM, P_SUM // psum = 0
MOVSD $(-1.0), NEG1
SHUFPD $0, NEG1, NEG1 // { -1, -1 }
XORQ IDX, IDX // i := 0
MOVQ $1, I_IDX // j := 1
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = floor( TAIL / 4 )
SHRQ $2, LEN // LEN = TAIL % 4
JZ dot_tail // if LEN == 0 { goto dot_tail }
MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining
dot_loop: // do {
MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_16_XPTR_IDX_8__X5
MOVDDUP_32_XPTR_IDX_8__X7
MOVDDUP_48_XPTR_IDX_8__X9
MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) }
MOVDDUP_16_XPTR_IIDX_8__X4
MOVDDUP_32_XPTR_IIDX_8__X6
MOVDDUP_48_XPTR_IIDX_8__X8
// X_i = { -imag(x[i]), -imag(x[i]) }
MULPD NEG1, X2
MULPD P_NEG1, X4
MULPD NEG1, X6
MULPD P_NEG1, X8
// X_j = { imag(y[i]), real(y[i]) }
MOVUPS (Y_PTR)(IDX*8), X10
MOVUPS 16(Y_PTR)(IDX*8), X11
MOVUPS 32(Y_PTR)(IDX*8), X12
MOVUPS 48(Y_PTR)(IDX*8), X13
// X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPD X10, X3
MULPD X11, X5
MULPD X12, X7
MULPD X13, X9
// X_j = { real(y[i]), imag(y[i]) }
SHUFPD $0x1, X10, X10
SHUFPD $0x1, X11, X11
SHUFPD $0x1, X12, X12
SHUFPD $0x1, X13, X13
// X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPD X10, X2
MULPD X11, X4
MULPD X12, X6
MULPD X13, X8
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
// psum += result[i]
ADDPD X3, SUM
ADDPD X5, P_SUM
ADDPD X7, SUM
ADDPD X9, P_SUM
ADDQ $8, IDX // IDX += 8
ADDQ $8, I_IDX // I_IDX += 8
DECQ LEN
JNZ dot_loop // } while --LEN > 0
ADDPD P_SUM, SUM // sum += psum
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dot_end
dot_tail: // do {
MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i]) , real(x[i]) }
MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) }
MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) }
MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) }
MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) }
MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDPD X3, SUM // SUM += result[i]
ADDQ $2, IDX // IDX += 2
ADDQ $2, I_IDX // I_IDX += 2
DECQ TAIL
JNZ dot_tail // } while --TAIL > 0
dot_end:
MOVUPS SUM, sum+48(FP)
RET

View File

@@ -0,0 +1,141 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVDDUP_XPTR__X3 LONG $0x1E120FF2 // MOVDDUP (SI), X3
#define MOVDDUP_XPTR_INCX__X5 LONG $0x120F42F2; WORD $0x062C // MOVDDUP (SI)(R8*1), X5
#define MOVDDUP_XPTR_INCX_2__X7 LONG $0x120F42F2; WORD $0x463C // MOVDDUP (SI)(R8*2), X7
#define MOVDDUP_XPTR_INCx3X__X9 LONG $0x120F46F2; WORD $0x0E0C // MOVDDUP (SI)(R9*1), X9
#define MOVDDUP_8_XPTR__X2 LONG $0x56120FF2; BYTE $0x08 // MOVDDUP 8(SI), X2
#define MOVDDUP_8_XPTR_INCX__X4 LONG $0x120F42F2; WORD $0x0664; BYTE $0x08 // MOVDDUP 8(SI)(R8*1), X4
#define MOVDDUP_8_XPTR_INCX_2__X6 LONG $0x120F42F2; WORD $0x4674; BYTE $0x08 // MOVDDUP 8(SI)(R8*2), X6
#define MOVDDUP_8_XPTR_INCx3X__X8 LONG $0x120F46F2; WORD $0x0E44; BYTE $0x08 // MOVDDUP 8(SI)(R9*1), X8
#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3
#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5
#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7
#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define SUM X0
#define P_SUM X1
#define INC_X R8
#define INCx3_X R9
#define INC_Y R10
#define INCx3_Y R11
// func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128)
TEXT ·DotuInc(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
MOVQ n+48(FP), LEN // LEN = n
PXOR SUM, SUM // sum = 0
CMPQ LEN, $0 // if LEN == 0 { return }
JE dot_end
MOVQ ix+72(FP), INC_X // INC_X = ix * sizeof(complex128)
SHLQ $4, INC_X
MOVQ iy+80(FP), INC_Y // INC_Y = iy * sizeof(complex128)
SHLQ $4, INC_Y
LEAQ (X_PTR)(INC_X*1), X_PTR // X_PTR = &(X_PTR[ix])
LEAQ (Y_PTR)(INC_Y*1), Y_PTR // Y_PTR = &(Y_PTR[iy])
MOVQ incX+56(FP), INC_X // INC_X = incX
SHLQ $4, INC_X // INC_X *= sizeof(complex128)
MOVQ incY+64(FP), INC_Y // INC_Y = incY
SHLQ $4, INC_Y // INC_Y *= sizeof(complex128)
MOVQ LEN, TAIL
ANDQ $3, TAIL // LEN = LEN % 4
SHRQ $2, LEN // LEN = floor( LEN / 4 )
JZ dot_tail // if LEN <= 4 { goto dot_tail }
PXOR P_SUM, P_SUM // psum = 0
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = 3 * incX * sizeof(complex128)
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = 3 * incY * sizeof(complex128)
dot_loop: // do {
MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_XPTR_INCX__X5
MOVDDUP_XPTR_INCX_2__X7
MOVDDUP_XPTR_INCx3X__X9
MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) }
MOVDDUP_8_XPTR_INCX__X4
MOVDDUP_8_XPTR_INCX_2__X6
MOVDDUP_8_XPTR_INCx3X__X8
// X_j = { imag(y[i]), real(y[i]) }
MOVUPS (Y_PTR), X10
MOVUPS (Y_PTR)(INC_Y*1), X11
MOVUPS (Y_PTR)(INC_Y*2), X12
MOVUPS (Y_PTR)(INCx3_Y*1), X13
// X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPD X10, X3
MULPD X11, X5
MULPD X12, X7
MULPD X13, X9
// X_j = { real(y[i]), imag(y[i]) }
SHUFPD $0x1, X10, X10
SHUFPD $0x1, X11, X11
SHUFPD $0x1, X12, X12
SHUFPD $0x1, X13, X13
// X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPD X10, X2
MULPD X11, X4
MULPD X12, X6
MULPD X13, X8
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
// psum += result[i]
ADDPD X3, SUM
ADDPD X5, P_SUM
ADDPD X7, SUM
ADDPD X9, P_SUM
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4])
DECQ LEN
JNZ dot_loop // } while --BX > 0
ADDPD P_SUM, SUM // sum += psum
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dot_end
dot_tail: // do {
MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) }
MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]) , real(y[i]) }
MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) }
MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDPD X3, SUM // sum += result[i]
ADDQ INC_X, X_PTR // X_PTR += incX
ADDQ INC_Y, Y_PTR // Y_PTR += incY
DECQ TAIL // --TAIL
JNZ dot_tail // } while TAIL > 0
dot_end:
MOVUPS SUM, sum+88(FP)
RET

View File

@@ -0,0 +1,130 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3
#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5
#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7
#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9
#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2
#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4
#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6
#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8
#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3
#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5
#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7
#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define SUM X0
#define P_SUM X1
#define IDX AX
#define I_IDX DX
// func DotuUnitary(x, y []complex128) (sum complex128)
TEXT ·DotuUnitary(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) )
CMPQ y_len+32(FP), LEN
CMOVQLE y_len+32(FP), LEN
PXOR SUM, SUM // SUM = 0
CMPQ LEN, $0 // if LEN == 0 { return }
JE dot_end
PXOR P_SUM, P_SUM // P_SUM = 0
XORQ IDX, IDX // IDX = 0
MOVQ $1, DX // j = 1
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = floor( LEN / 4 )
SHRQ $2, LEN // LEN = LEN % 4
JZ dot_tail // if LEN == 0 { goto dot_tail }
dot_loop: // do {
MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_16_XPTR_IDX_8__X5
MOVDDUP_32_XPTR_IDX_8__X7
MOVDDUP_48_XPTR_IDX_8__X9
MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) }
MOVDDUP_16_XPTR_IIDX_8__X4
MOVDDUP_32_XPTR_IIDX_8__X6
MOVDDUP_48_XPTR_IIDX_8__X8
// X_j = { imag(y[i]), real(y[i]) }
MOVUPS (Y_PTR)(IDX*8), X10
MOVUPS 16(Y_PTR)(IDX*8), X11
MOVUPS 32(Y_PTR)(IDX*8), X12
MOVUPS 48(Y_PTR)(IDX*8), X13
// X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPD X10, X3
MULPD X11, X5
MULPD X12, X7
MULPD X13, X9
// X_j = { real(y[i]), imag(y[i]) }
SHUFPD $0x1, X10, X10
SHUFPD $0x1, X11, X11
SHUFPD $0x1, X12, X12
SHUFPD $0x1, X13, X13
// X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPD X10, X2
MULPD X11, X4
MULPD X12, X6
MULPD X13, X8
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
// psum += result[i]
ADDPD X3, SUM
ADDPD X5, P_SUM
ADDPD X7, SUM
ADDPD X9, P_SUM
ADDQ $8, IDX // IDX += 8
ADDQ $8, I_IDX // I_IDX += 8
DECQ LEN
JNZ dot_loop // } while --LEN > 0
ADDPD P_SUM, SUM // SUM += P_SUM
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dot_end
dot_tail: // do {
MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i] , real(x[i]) }
MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) }
MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) }
MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) }
SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) }
MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDPD X3, SUM // psum += result[i]
ADDQ $2, IDX // IDX += 2
ADDQ $2, I_IDX // I_IDX += 2
DECQ TAIL // --TAIL
JNZ dot_tail // } while TAIL > 0
dot_end:
MOVUPS SUM, sum+48(FP)
RET

View File

@@ -0,0 +1,69 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define SRC SI
#define DST SI
#define LEN CX
#define TAIL BX
#define INC R9
#define INC3 R10
#define ALPHA X0
#define ALPHA_2 X1
#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // MOVDDUP 8(SP), X0
// func DscalInc(alpha float64, x []complex128, n, inc uintptr)
TEXT ·DscalInc(SB), NOSPLIT, $0
MOVQ x_base+8(FP), SRC // SRC = &x
MOVQ n+32(FP), LEN // LEN = n
CMPQ LEN, $0 // if LEN == 0 { return }
JE dscal_end
MOVDDUP_ALPHA // ALPHA = alpha
MOVQ inc+40(FP), INC // INC = inc
SHLQ $4, INC // INC = INC * sizeof(complex128)
LEAQ (INC)(INC*2), INC3 // INC3 = 3 * INC
MOVUPS ALPHA, ALPHA_2 // Copy ALPHA and ALPHA_2 for pipelining
MOVQ LEN, TAIL // TAIL = LEN
SHRQ $2, LEN // LEN = floor( n / 4 )
JZ dscal_tail // if LEN == 0 { goto dscal_tail }
dscal_loop: // do {
MOVUPS (SRC), X2 // X_i = x[i]
MOVUPS (SRC)(INC*1), X3
MOVUPS (SRC)(INC*2), X4
MOVUPS (SRC)(INC3*1), X5
MULPD ALPHA, X2 // X_i *= ALPHA
MULPD ALPHA_2, X3
MULPD ALPHA, X4
MULPD ALPHA_2, X5
MOVUPS X2, (DST) // x[i] = X_i
MOVUPS X3, (DST)(INC*1)
MOVUPS X4, (DST)(INC*2)
MOVUPS X5, (DST)(INC3*1)
LEAQ (SRC)(INC*4), SRC // SRC += INC*4
DECQ LEN
JNZ dscal_loop // } while --LEN > 0
dscal_tail:
ANDQ $3, TAIL // TAIL = TAIL % 4
JE dscal_end // if TAIL == 0 { return }
dscal_tail_loop: // do {
MOVUPS (SRC), X2 // X_i = x[i]
MULPD ALPHA, X2 // X_i *= ALPHA
MOVUPS X2, (DST) // x[i] = X_i
ADDQ INC, SRC // SRC += INC
DECQ TAIL
JNZ dscal_tail_loop // } while --TAIL > 0
dscal_end:
RET

View File

@@ -0,0 +1,66 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define SRC SI
#define DST SI
#define LEN CX
#define IDX AX
#define TAIL BX
#define ALPHA X0
#define ALPHA_2 X1
#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // MOVDDUP 8(SP), X0
// func DscalUnitary(alpha float64, x []complex128)
TEXT ·DscalUnitary(SB), NOSPLIT, $0
MOVQ x_base+8(FP), SRC // SRC = &x
MOVQ x_len+16(FP), LEN // LEN = len(x)
CMPQ LEN, $0 // if LEN == 0 { return }
JE dscal_end
MOVDDUP_ALPHA // ALPHA = alpha
XORQ IDX, IDX // IDX = 0
MOVUPS ALPHA, ALPHA_2 // Copy ALPHA to ALPHA_2 for pipelining
MOVQ LEN, TAIL // TAIL = LEN
SHRQ $2, LEN // LEN = floor( n / 4 )
JZ dscal_tail // if LEN == 0 { goto dscal_tail }
dscal_loop: // do {
MOVUPS (SRC)(IDX*8), X2 // X_i = x[i]
MOVUPS 16(SRC)(IDX*8), X3
MOVUPS 32(SRC)(IDX*8), X4
MOVUPS 48(SRC)(IDX*8), X5
MULPD ALPHA, X2 // X_i *= ALPHA
MULPD ALPHA_2, X3
MULPD ALPHA, X4
MULPD ALPHA_2, X5
MOVUPS X2, (DST)(IDX*8) // x[i] = X_i
MOVUPS X3, 16(DST)(IDX*8)
MOVUPS X4, 32(DST)(IDX*8)
MOVUPS X5, 48(DST)(IDX*8)
ADDQ $8, IDX // IDX += 8
DECQ LEN
JNZ dscal_loop // } while --LEN > 0
dscal_tail:
ANDQ $3, TAIL // TAIL = TAIL % 4
JZ dscal_end // if TAIL == 0 { return }
dscal_tail_loop: // do {
MOVUPS (SRC)(IDX*8), X2 // X_i = x[i]
MULPD ALPHA, X2 // X_i *= ALPHA
MOVUPS X2, (DST)(IDX*8) // x[i] = X_i
ADDQ $2, IDX // IDX += 2
DECQ TAIL
JNZ dscal_tail_loop // } while --TAIL > 0
dscal_end:
RET

31
vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package c128
// ScalUnitaryTo is
// for i, v := range x {
// dst[i] = alpha * v
// }
func ScalUnitaryTo(dst []complex128, alpha complex128, x []complex128) {
for i, v := range x {
dst[i] = alpha * v
}
}
// ScalIncTo is
// var idst, ix uintptr
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha * x[ix]
// ix += incX
// idst += incDst
// }
func ScalIncTo(dst []complex128, incDst uintptr, alpha complex128, x []complex128, n, incX uintptr) {
var idst, ix uintptr
for i := 0; i < int(n); i++ {
dst[idst] = alpha * x[ix]
ix += incX
idst += incDst
}
}

View File

@@ -0,0 +1,116 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define SRC SI
#define DST SI
#define LEN CX
#define IDX AX
#define TAIL BX
#define ALPHA X0
#define ALPHA_C X1
#define ALPHA2 X10
#define ALPHA_C2 X11
#define MOVDDUP_X2_X3 LONG $0xDA120FF2 // MOVDDUP X2, X3
#define MOVDDUP_X4_X5 LONG $0xEC120FF2 // MOVDDUP X4, X5
#define MOVDDUP_X6_X7 LONG $0xFE120FF2 // MOVDDUP X6, X7
#define MOVDDUP_X8_X9 LONG $0x120F45F2; BYTE $0xC8 // MOVDDUP X8, X9
#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3
#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5
#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7
#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9
// func ScalUnitary(alpha complex128, x []complex128)
TEXT ·ScalUnitary(SB), NOSPLIT, $0
MOVQ x_base+16(FP), SRC // SRC = &x
MOVQ x_len+24(FP), LEN // LEN = len(x)
CMPQ LEN, $0 // if LEN == 0 { return }
JE scal_end
MOVUPS alpha+0(FP), ALPHA // ALPHA = { imag(alpha), real(alpha) }
MOVAPS ALPHA, ALPHA_C
SHUFPD $0x1, ALPHA_C, ALPHA_C // ALPHA_C = { real(alpha), imag(alpha) }
XORQ IDX, IDX // IDX = 0
MOVAPS ALPHA, ALPHA2 // Copy ALPHA and ALPHA_C for pipelining
MOVAPS ALPHA_C, ALPHA_C2
MOVQ LEN, TAIL
SHRQ $2, LEN // LEN = floor( n / 4 )
JZ scal_tail // if BX == 0 { goto scal_tail }
scal_loop: // do {
MOVUPS (SRC)(IDX*8), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVUPS 16(SRC)(IDX*8), X4
MOVUPS 32(SRC)(IDX*8), X6
MOVUPS 48(SRC)(IDX*8), X8
// X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_X2_X3
MOVDDUP_X4_X5
MOVDDUP_X6_X7
MOVDDUP_X8_X9
// X_i = { imag(x[i]), imag(x[i]) }
SHUFPD $0x3, X2, X2
SHUFPD $0x3, X4, X4
SHUFPD $0x3, X6, X6
SHUFPD $0x3, X8, X8
// X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) }
// X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) }
MULPD ALPHA_C, X2
MULPD ALPHA, X3
MULPD ALPHA_C2, X4
MULPD ALPHA2, X5
MULPD ALPHA_C, X6
MULPD ALPHA, X7
MULPD ALPHA_C2, X8
MULPD ALPHA2, X9
// X_(i+1) = {
// imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]),
// real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
MOVUPS X3, (DST)(IDX*8) // x[i] = X_(i+1)
MOVUPS X5, 16(DST)(IDX*8)
MOVUPS X7, 32(DST)(IDX*8)
MOVUPS X9, 48(DST)(IDX*8)
ADDQ $8, IDX // IDX += 8
DECQ LEN
JNZ scal_loop // } while --LEN > 0
scal_tail:
ANDQ $3, TAIL // TAIL = TAIL % 4
JZ scal_end // if TAIL == 0 { return }
scal_tail_loop: // do {
MOVUPS (SRC)(IDX*8), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) }
SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) }
MULPD ALPHA_C, X2 // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) }
MULPD ALPHA, X3 // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]),
// real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i])
// }
ADDSUBPD_X2_X3
MOVUPS X3, (DST)(IDX*8) // x[i] = X_(i+1)
ADDQ $2, IDX // IDX += 2
DECQ TAIL
JNZ scal_tail_loop // } while --LEN > 0
scal_end:
RET

View File

@@ -0,0 +1,121 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define SRC SI
#define DST SI
#define LEN CX
#define TAIL BX
#define INC R9
#define INC3 R10
#define ALPHA X0
#define ALPHA_C X1
#define ALPHA2 X10
#define ALPHA_C2 X11
#define MOVDDUP_X2_X3 LONG $0xDA120FF2 // MOVDDUP X2, X3
#define MOVDDUP_X4_X5 LONG $0xEC120FF2 // MOVDDUP X4, X5
#define MOVDDUP_X6_X7 LONG $0xFE120FF2 // MOVDDUP X6, X7
#define MOVDDUP_X8_X9 LONG $0x120F45F2; BYTE $0xC8 // MOVDDUP X8, X9
#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3
#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5
#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7
#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9
// func ScalInc(alpha complex128, x []complex128, n, inc uintptr)
TEXT ·ScalInc(SB), NOSPLIT, $0
MOVQ x_base+16(FP), SRC // SRC = &x
MOVQ n+40(FP), LEN // LEN = len(x)
CMPQ LEN, $0
JE scal_end // if LEN == 0 { return }
MOVQ inc+48(FP), INC // INC = inc
SHLQ $4, INC // INC = INC * sizeof(complex128)
LEAQ (INC)(INC*2), INC3 // INC3 = 3 * INC
MOVUPS alpha+0(FP), ALPHA // ALPHA = { imag(alpha), real(alpha) }
MOVAPS ALPHA, ALPHA_C
SHUFPD $0x1, ALPHA_C, ALPHA_C // ALPHA_C = { real(alpha), imag(alpha) }
MOVAPS ALPHA, ALPHA2 // Copy ALPHA and ALPHA_C for pipelining
MOVAPS ALPHA_C, ALPHA_C2
MOVQ LEN, TAIL
SHRQ $2, LEN // LEN = floor( n / 4 )
JZ scal_tail // if BX == 0 { goto scal_tail }
scal_loop: // do {
MOVUPS (SRC), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVUPS (SRC)(INC*1), X4
MOVUPS (SRC)(INC*2), X6
MOVUPS (SRC)(INC3*1), X8
// X_(i+1) = { real(x[i], real(x[i]) }
MOVDDUP_X2_X3
MOVDDUP_X4_X5
MOVDDUP_X6_X7
MOVDDUP_X8_X9
// X_i = { imag(x[i]), imag(x[i]) }
SHUFPD $0x3, X2, X2
SHUFPD $0x3, X4, X4
SHUFPD $0x3, X6, X6
SHUFPD $0x3, X8, X8
// X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) }
// X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) }
MULPD ALPHA_C, X2
MULPD ALPHA, X3
MULPD ALPHA_C2, X4
MULPD ALPHA2, X5
MULPD ALPHA_C, X6
MULPD ALPHA, X7
MULPD ALPHA_C2, X8
MULPD ALPHA2, X9
// X_(i+1) = {
// imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]),
// real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i])
// }
ADDSUBPD_X2_X3
ADDSUBPD_X4_X5
ADDSUBPD_X6_X7
ADDSUBPD_X8_X9
MOVUPS X3, (DST) // x[i] = X_(i+1)
MOVUPS X5, (DST)(INC*1)
MOVUPS X7, (DST)(INC*2)
MOVUPS X9, (DST)(INC3*1)
LEAQ (SRC)(INC*4), SRC // SRC = &(SRC[inc*4])
DECQ LEN
JNZ scal_loop // } while --BX > 0
scal_tail:
ANDQ $3, TAIL // TAIL = TAIL % 4
JE scal_end // if TAIL == 0 { return }
scal_tail_loop: // do {
MOVUPS (SRC), X2 // X_i = { imag(x[i]), real(x[i]) }
MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) }
SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) }
MULPD ALPHA_C, X2 // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) }
MULPD ALPHA, X3 // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) }
// X_(i+1) = {
// imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]),
// real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i])
// }
ADDSUBPD_X2_X3
MOVUPS X3, (DST) // x[i] = X_i
ADDQ INC, SRC // SRC = &(SRC[incX])
DECQ TAIL
JNZ scal_tail_loop // } while --TAIL > 0
scal_end:
RET

View File

@@ -0,0 +1,96 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
package c128
// AxpyUnitary is
// for i, v := range x {
// y[i] += alpha * v
// }
func AxpyUnitary(alpha complex128, x, y []complex128)
// AxpyUnitaryTo is
// for i, v := range x {
// dst[i] = alpha*v + y[i]
// }
func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128)
// AxpyInc is
// for i := 0; i < int(n); i++ {
// y[iy] += alpha * x[ix]
// ix += incX
// iy += incY
// }
func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr)
// AxpyIncTo is
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha*x[ix] + y[iy]
// ix += incX
// iy += incY
// idst += incDst
// }
func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr)
// DscalUnitary is
// for i, v := range x {
// x[i] = complex(real(v)*alpha, imag(v)*alpha)
// }
func DscalUnitary(alpha float64, x []complex128)
// DscalInc is
// var ix uintptr
// for i := 0; i < int(n); i++ {
// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha)
// ix += inc
// }
func DscalInc(alpha float64, x []complex128, n, inc uintptr)
// ScalInc is
// var ix uintptr
// for i := 0; i < int(n); i++ {
// x[ix] *= alpha
// ix += incX
// }
func ScalInc(alpha complex128, x []complex128, n, inc uintptr)
// ScalUnitary is
// for i := range x {
// x[i] *= alpha
// }
func ScalUnitary(alpha complex128, x []complex128)
// DotcUnitary is
// for i, v := range x {
// sum += y[i] * cmplx.Conj(v)
// }
// return sum
func DotcUnitary(x, y []complex128) (sum complex128)
// DotcInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * cmplx.Conj(x[ix])
// ix += incX
// iy += incY
// }
// return sum
func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128)
// DotuUnitary is
// for i, v := range x {
// sum += y[i] * v
// }
// return sum
func DotuUnitary(x, y []complex128) (sum complex128)
// DotuInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * x[ix]
// ix += incX
// iy += incY
// }
// return sum
func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128)

View File

@@ -0,0 +1,163 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 noasm appengine safe
package c128
import "math/cmplx"
// AxpyUnitary is
// for i, v := range x {
// y[i] += alpha * v
// }
func AxpyUnitary(alpha complex128, x, y []complex128) {
for i, v := range x {
y[i] += alpha * v
}
}
// AxpyUnitaryTo is
// for i, v := range x {
// dst[i] = alpha*v + y[i]
// }
func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) {
for i, v := range x {
dst[i] = alpha*v + y[i]
}
}
// AxpyInc is
// for i := 0; i < int(n); i++ {
// y[iy] += alpha * x[ix]
// ix += incX
// iy += incY
// }
func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) {
for i := 0; i < int(n); i++ {
y[iy] += alpha * x[ix]
ix += incX
iy += incY
}
}
// AxpyIncTo is
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha*x[ix] + y[iy]
// ix += incX
// iy += incY
// idst += incDst
// }
func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) {
for i := 0; i < int(n); i++ {
dst[idst] = alpha*x[ix] + y[iy]
ix += incX
iy += incY
idst += incDst
}
}
// DscalUnitary is
// for i, v := range x {
// x[i] = complex(real(v)*alpha, imag(v)*alpha)
// }
func DscalUnitary(alpha float64, x []complex128) {
for i, v := range x {
x[i] = complex(real(v)*alpha, imag(v)*alpha)
}
}
// DscalInc is
// var ix uintptr
// for i := 0; i < int(n); i++ {
// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha)
// ix += inc
// }
func DscalInc(alpha float64, x []complex128, n, inc uintptr) {
var ix uintptr
for i := 0; i < int(n); i++ {
x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha)
ix += inc
}
}
// ScalInc is
// var ix uintptr
// for i := 0; i < int(n); i++ {
// x[ix] *= alpha
// ix += incX
// }
func ScalInc(alpha complex128, x []complex128, n, inc uintptr) {
var ix uintptr
for i := 0; i < int(n); i++ {
x[ix] *= alpha
ix += inc
}
}
// ScalUnitary is
// for i := range x {
// x[i] *= alpha
// }
func ScalUnitary(alpha complex128, x []complex128) {
for i := range x {
x[i] *= alpha
}
}
// DotcUnitary is
// for i, v := range x {
// sum += y[i] * cmplx.Conj(v)
// }
// return sum
func DotcUnitary(x, y []complex128) (sum complex128) {
for i, v := range x {
sum += y[i] * cmplx.Conj(v)
}
return sum
}
// DotcInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * cmplx.Conj(x[ix])
// ix += incX
// iy += incY
// }
// return sum
func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) {
for i := 0; i < int(n); i++ {
sum += y[iy] * cmplx.Conj(x[ix])
ix += incX
iy += incY
}
return sum
}
// DotuUnitary is
// for i, v := range x {
// sum += y[i] * v
// }
// return sum
func DotuUnitary(x, y []complex128) (sum complex128) {
for i, v := range x {
sum += y[i] * v
}
return sum
}
// DotuInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * x[ix]
// ix += incX
// iy += incY
// }
// return sum
func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) {
for i := 0; i < int(n); i++ {
sum += y[iy] * x[ix]
ix += incX
iy += incY
}
return sum
}

View File

@@ -0,0 +1,151 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// MOVSHDUP X3, X2
#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3
// MOVSLDUP X3, X3
#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB
// ADDSUBPS X2, X3
#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA
// MOVSHDUP X5, X4
#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5
// MOVSLDUP X5, X5
#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED
// ADDSUBPS X4, X5
#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC
// MOVSHDUP X7, X6
#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7
// MOVSLDUP X7, X7
#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF
// ADDSUBPS X6, X7
#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE
// MOVSHDUP X9, X8
#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1
// MOVSLDUP X9, X9
#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9
// ADDSUBPS X8, X9
#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8
// func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyInc(SB), NOSPLIT, $0
MOVQ x_base+8(FP), SI // SI = &x
MOVQ y_base+32(FP), DI // DI = &y
MOVQ n+56(FP), CX // CX = n
CMPQ CX, $0 // if n==0 { return }
JE axpyi_end
MOVQ ix+80(FP), R8 // R8 = ix
MOVQ iy+88(FP), R9 // R9 = iy
LEAQ (SI)(R8*8), SI // SI = &(x[ix])
LEAQ (DI)(R9*8), DI // DI = &(y[iy])
MOVQ DI, DX // DX = DI // Read/Write pointers
MOVQ incX+64(FP), R8 // R8 = incX
SHLQ $3, R8 // R8 *= sizeof(complex64)
MOVQ incY+72(FP), R9 // R9 = incY
SHLQ $3, R9 // R9 *= sizeof(complex64)
MOVSD alpha+0(FP), X0 // X0 = { 0, 0, imag(a), real(a) }
MOVAPS X0, X1
SHUFPS $0x11, X1, X1 // X1 = { 0, 0, real(a), imag(a) }
MOVAPS X0, X10 // Copy X0 and X1 for pipelining
MOVAPS X1, X11
MOVQ CX, BX
ANDQ $3, CX // CX = n % 4
SHRQ $2, BX // BX = floor( n / 4 )
JZ axpyi_tail // if BX == 0 { goto axpyi_tail }
axpyi_loop: // do {
MOVSD (SI), X3 // X_i = { imag(x[i+1]), real(x[i+1]) }
MOVSD (SI)(R8*1), X5
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2])
MOVSD (SI), X7
MOVSD (SI)(R8*1), X9
// X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSHDUP_X3_X2
MOVSHDUP_X5_X4
MOVSHDUP_X7_X6
MOVSHDUP_X9_X8
// X_i = { real(x[i]), real(x[i]) }
MOVSLDUP_X3_X3
MOVSLDUP_X5_X5
MOVSLDUP_X7_X7
MOVSLDUP_X9_X9
// X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPS X1, X2
MULPS X0, X3
MULPS X11, X4
MULPS X10, X5
MULPS X1, X6
MULPS X0, X7
MULPS X11, X8
MULPS X10, X9
// X_i = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]),
// }
ADDSUBPS_X2_X3
ADDSUBPS_X4_X5
ADDSUBPS_X6_X7
ADDSUBPS_X8_X9
// X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
MOVSD (DX), X2
MOVSD (DX)(R9*1), X4
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
MOVSD (DX), X6
MOVSD (DX)(R9*1), X8
ADDPS X2, X3
ADDPS X4, X5
ADDPS X6, X7
ADDPS X8, X9
MOVSD X3, (DI) // y[i] = X_i
MOVSD X5, (DI)(R9*1)
LEAQ (DI)(R9*2), DI // DI = &(DI[incDst])
MOVSD X7, (DI)
MOVSD X9, (DI)(R9*1)
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2])
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
LEAQ (DI)(R9*2), DI // DI = &(DI[incDst])
DECQ BX
JNZ axpyi_loop // } while --BX > 0
CMPQ CX, $0 // if CX == 0 { return }
JE axpyi_end
axpyi_tail: // do {
MOVSD (SI), X3 // X_i = { imag(x[i+1]), real(x[i+1]) }
MOVSHDUP_X3_X2 // X_(i-1) = { real(x[i]), real(x[i]) }
MOVSLDUP_X3_X3 // X_i = { imag(x[i]), imag(x[i]) }
// X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPS X1, X2
MULPS X0, X3
// X_i = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]),
// }
ADDSUBPS_X2_X3 // (ai*x1r+ar*x1i, ar*x1r-ai*x1i)
// X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
MOVSD (DI), X4
ADDPS X4, X3
MOVSD X3, (DI) // y[i] = X_i
ADDQ R8, SI // SI += incX
ADDQ R9, DI // DI += incY
LOOP axpyi_tail // } while --CX > 0
axpyi_end:
RET

View File

@@ -0,0 +1,156 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// MOVSHDUP X3, X2
#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3
// MOVSLDUP X3, X3
#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB
// ADDSUBPS X2, X3
#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA
// MOVSHDUP X5, X4
#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5
// MOVSLDUP X5, X5
#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED
// ADDSUBPS X4, X5
#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC
// MOVSHDUP X7, X6
#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7
// MOVSLDUP X7, X7
#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF
// ADDSUBPS X6, X7
#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE
// MOVSHDUP X9, X8
#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1
// MOVSLDUP X9, X9
#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9
// ADDSUBPS X8, X9
#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8
// func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyIncTo(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ x_base+48(FP), SI // SI = &x
MOVQ y_base+72(FP), DX // DX = &y
MOVQ n+96(FP), CX // CX = n
CMPQ CX, $0 // if n==0 { return }
JE axpyi_end
MOVQ ix+120(FP), R8 // Load the first index
MOVQ iy+128(FP), R9
MOVQ idst+32(FP), R10
LEAQ (SI)(R8*8), SI // SI = &(x[ix])
LEAQ (DX)(R9*8), DX // DX = &(y[iy])
LEAQ (DI)(R10*8), DI // DI = &(dst[idst])
MOVQ incX+104(FP), R8 // Incrementors*8 for easy iteration (ADDQ)
SHLQ $3, R8
MOVQ incY+112(FP), R9
SHLQ $3, R9
MOVQ incDst+24(FP), R10
SHLQ $3, R10
MOVSD alpha+40(FP), X0 // X0 = { 0, 0, imag(a), real(a) }
MOVAPS X0, X1
SHUFPS $0x11, X1, X1 // X1 = { 0, 0, real(a), imag(a) }
MOVAPS X0, X10 // Copy X0 and X1 for pipelining
MOVAPS X1, X11
MOVQ CX, BX
ANDQ $3, CX // CX = n % 4
SHRQ $2, BX // BX = floor( n / 4 )
JZ axpyi_tail // if BX == 0 { goto axpyi_tail }
axpyi_loop: // do {
MOVSD (SI), X3 // X_i = { imag(x[i]), real(x[i]) }
MOVSD (SI)(R8*1), X5
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2])
MOVSD (SI), X7
MOVSD (SI)(R8*1), X9
// X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSHDUP_X3_X2
MOVSHDUP_X5_X4
MOVSHDUP_X7_X6
MOVSHDUP_X9_X8
// X_i = { real(x[i]), real(x[i]) }
MOVSLDUP_X3_X3
MOVSLDUP_X5_X5
MOVSLDUP_X7_X7
MOVSLDUP_X9_X9
// X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
// X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) }
MULPS X1, X2
MULPS X0, X3
MULPS X11, X4
MULPS X10, X5
MULPS X1, X6
MULPS X0, X7
MULPS X11, X8
MULPS X10, X9
// X_i = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]),
// }
ADDSUBPS_X2_X3
ADDSUBPS_X4_X5
ADDSUBPS_X6_X7
ADDSUBPS_X8_X9
// X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
MOVSD (DX), X2
MOVSD (DX)(R9*1), X4
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
MOVSD (DX), X6
MOVSD (DX)(R9*1), X8
ADDPS X2, X3
ADDPS X4, X5
ADDPS X6, X7
ADDPS X8, X9
MOVSD X3, (DI) // y[i] = X_i
MOVSD X5, (DI)(R10*1)
LEAQ (DI)(R10*2), DI // DI = &(DI[incDst])
MOVSD X7, (DI)
MOVSD X9, (DI)(R10*1)
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2])
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
LEAQ (DI)(R10*2), DI // DI = &(DI[incDst])
DECQ BX
JNZ axpyi_loop // } while --BX > 0
CMPQ CX, $0 // if CX == 0 { return }
JE axpyi_end
axpyi_tail:
MOVSD (SI), X3 // X_i = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) }
// X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPS X1, X2
MULPS X0, X3
// X_i = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]),
// }
ADDSUBPS_X2_X3
// X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) }
MOVSD (DX), X4
ADDPS X4, X3
MOVSD X3, (DI) // y[i] = X_i
ADDQ R8, SI // SI += incX
ADDQ R9, DX // DX += incY
ADDQ R10, DI // DI += incDst
LOOP axpyi_tail // } while --CX > 0
axpyi_end:
RET

View File

@@ -0,0 +1,160 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// MOVSHDUP X3, X2
#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3
// MOVSLDUP X3, X3
#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB
// ADDSUBPS X2, X3
#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA
// MOVSHDUP X5, X4
#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5
// MOVSLDUP X5, X5
#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED
// ADDSUBPS X4, X5
#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC
// MOVSHDUP X7, X6
#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7
// MOVSLDUP X7, X7
#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF
// ADDSUBPS X6, X7
#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE
// MOVSHDUP X9, X8
#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1
// MOVSLDUP X9, X9
#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9
// ADDSUBPS X8, X9
#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8
// func AxpyUnitary(alpha complex64, x, y []complex64)
TEXT ·AxpyUnitary(SB), NOSPLIT, $0
MOVQ x_base+8(FP), SI // SI = &x
MOVQ y_base+32(FP), DI // DI = &y
MOVQ x_len+16(FP), CX // CX = min( len(x), len(y) )
CMPQ y_len+40(FP), CX
CMOVQLE y_len+40(FP), CX
CMPQ CX, $0 // if CX == 0 { return }
JE caxy_end
PXOR X0, X0 // Clear work registers and cache-align loop
PXOR X1, X1
MOVSD alpha+0(FP), X0 // X0 = { 0, 0, imag(a), real(a) }
SHUFPD $0, X0, X0 // X0 = { imag(a), real(a), imag(a), real(a) }
MOVAPS X0, X1
SHUFPS $0x11, X1, X1 // X1 = { real(a), imag(a), real(a), imag(a) }
XORQ AX, AX // i = 0
MOVQ DI, BX // Align on 16-byte boundary for ADDPS
ANDQ $15, BX // BX = &y & 15
JZ caxy_no_trim // if BX == 0 { goto caxy_no_trim }
// Trim first value in unaligned buffer
XORPS X2, X2 // Clear work registers and cache-align loop
XORPS X3, X3
XORPS X4, X4
MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) }
MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), real(a)*real(x[i]) - imag(a)*imag(x[i]) }
ADDSUBPS_X2_X3
MOVSD (DI)(AX*8), X4 // X3 += y[i]
ADDPS X4, X3
MOVSD X3, (DI)(AX*8) // y[i] = X3
INCQ AX // i++
DECQ CX // --CX
JZ caxy_end // if CX == 0 { return }
caxy_no_trim:
MOVAPS X0, X10 // Copy X0 and X1 for pipelineing
MOVAPS X1, X11
MOVQ CX, BX
ANDQ $7, CX // CX = n % 8
SHRQ $3, BX // BX = floor( n / 8 )
JZ caxy_tail // if BX == 0 { goto caxy_tail }
caxy_loop: // do {
// X_i = { imag(x[i]), real(x[i]), imag(x[i+1]), real(x[i+1]) }
MOVUPS (SI)(AX*8), X3
MOVUPS 16(SI)(AX*8), X5
MOVUPS 32(SI)(AX*8), X7
MOVUPS 48(SI)(AX*8), X9
// X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) }
MOVSHDUP_X3_X2
MOVSHDUP_X5_X4
MOVSHDUP_X7_X6
MOVSHDUP_X9_X8
// X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) }
MOVSLDUP_X3_X3
MOVSLDUP_X5_X5
MOVSLDUP_X7_X7
MOVSLDUP_X9_X9
// X_i = { imag(a) * real(x[i]), real(a) * real(x[i]),
// imag(a) * real(x[i+1]), real(a) * real(x[i+1]) }
// X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]),
// real(a) * imag(x[i+1]), imag(a) * imag(x[i+1]) }
MULPS X1, X2
MULPS X0, X3
MULPS X11, X4
MULPS X10, X5
MULPS X1, X6
MULPS X0, X7
MULPS X11, X8
MULPS X10, X9
// X_i = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]),
// imag(result[i+1]): imag(a)*real(x[i+1]) + real(a)*imag(x[i+1]),
// real(result[i+1]): real(a)*real(x[i+1]) - imag(a)*imag(x[i+1]),
// }
ADDSUBPS_X2_X3
ADDSUBPS_X4_X5
ADDSUBPS_X6_X7
ADDSUBPS_X8_X9
// X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]),
// imag(result[i+1]) + imag(y[i+1]), real(result[i+1]) + real(y[i+1]) }
ADDPS (DI)(AX*8), X3
ADDPS 16(DI)(AX*8), X5
ADDPS 32(DI)(AX*8), X7
ADDPS 48(DI)(AX*8), X9
MOVUPS X3, (DI)(AX*8) // y[i:i+1] = X_i
MOVUPS X5, 16(DI)(AX*8)
MOVUPS X7, 32(DI)(AX*8)
MOVUPS X9, 48(DI)(AX*8)
ADDQ $8, AX // i += 8
DECQ BX // --BX
JNZ caxy_loop // } while BX > 0
CMPQ CX, $0 // if CX == 0 { return }
JE caxy_end
caxy_tail: // do {
MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) }
MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(a)*real(x[i]) - imag(a)*imag(x[i]) }
ADDSUBPS_X2_X3
MOVSD (DI)(AX*8), X4 // X3 += y[i]
ADDPS X4, X3
MOVSD X3, (DI)(AX*8) // y[i] = X3
INCQ AX // ++i
LOOP caxy_tail // } while --CX > 0
caxy_end:
RET

View File

@@ -0,0 +1,157 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// MOVSHDUP X3, X2
#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3
// MOVSLDUP X3, X3
#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB
// ADDSUBPS X2, X3
#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA
// MOVSHDUP X5, X4
#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5
// MOVSLDUP X5, X5
#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED
// ADDSUBPS X4, X5
#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC
// MOVSHDUP X7, X6
#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7
// MOVSLDUP X7, X7
#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF
// ADDSUBPS X6, X7
#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE
// MOVSHDUP X9, X8
#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1
// MOVSLDUP X9, X9
#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9
// ADDSUBPS X8, X9
#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8
// func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64)
TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ x_base+32(FP), SI // SI = &x
MOVQ y_base+56(FP), DX // DX = &y
MOVQ x_len+40(FP), CX
CMPQ y_len+64(FP), CX // CX = min( len(x), len(y), len(dst) )
CMOVQLE y_len+64(FP), CX
CMPQ dst_len+8(FP), CX
CMOVQLE dst_len+8(FP), CX
CMPQ CX, $0 // if CX == 0 { return }
JE caxy_end
MOVSD alpha+24(FP), X0 // X0 = { 0, 0, imag(a), real(a) }
SHUFPD $0, X0, X0 // X0 = { imag(a), real(a), imag(a), real(a) }
MOVAPS X0, X1
SHUFPS $0x11, X1, X1 // X1 = { real(a), imag(a), real(a), imag(a) }
XORQ AX, AX // i = 0
MOVQ DX, BX // Align on 16-byte boundary for ADDPS
ANDQ $15, BX // BX = &y & 15
JZ caxy_no_trim // if BX == 0 { goto caxy_no_trim }
MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) }
MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), real(a)*real(x[i]) - imag(a)*imag(x[i]) }
ADDSUBPS_X2_X3
MOVSD (DX)(AX*8), X4 // X3 += y[i]
ADDPS X4, X3
MOVSD X3, (DI)(AX*8) // dst[i] = X3
INCQ AX // i++
DECQ CX // --CX
JZ caxy_tail // if BX == 0 { goto caxy_tail }
caxy_no_trim:
MOVAPS X0, X10 // Copy X0 and X1 for pipelineing
MOVAPS X1, X11
MOVQ CX, BX
ANDQ $7, CX // CX = n % 8
SHRQ $3, BX // BX = floor( n / 8 )
JZ caxy_tail // if BX == 0 { goto caxy_tail }
caxy_loop:
// X_i = { imag(x[i]), real(x[i]), imag(x[i+1]), real(x[i+1]) }
MOVUPS (SI)(AX*8), X3
MOVUPS 16(SI)(AX*8), X5
MOVUPS 32(SI)(AX*8), X7
MOVUPS 48(SI)(AX*8), X9
// X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) }
MOVSHDUP_X3_X2
MOVSHDUP_X5_X4
MOVSHDUP_X7_X6
MOVSHDUP_X9_X8
// X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) }
MOVSLDUP_X3_X3
MOVSLDUP_X5_X5
MOVSLDUP_X7_X7
MOVSLDUP_X9_X9
// X_i = { imag(a) * real(x[i]), real(a) * real(x[i]),
// imag(a) * real(x[i+1]), real(a) * real(x[i+1]) }
// X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]),
// real(a) * imag(x[i+1]), imag(a) * imag(x[i+1]) }
MULPS X1, X2
MULPS X0, X3
MULPS X11, X4
MULPS X10, X5
MULPS X1, X6
MULPS X0, X7
MULPS X11, X8
MULPS X10, X9
// X_i = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]),
// imag(result[i+1]): imag(a)*real(x[i+1]) + real(a)*imag(x[i+1]),
// real(result[i+1]): real(a)*real(x[i+1]) - imag(a)*imag(x[i+1]),
// }
ADDSUBPS_X2_X3
ADDSUBPS_X4_X5
ADDSUBPS_X6_X7
ADDSUBPS_X8_X9
// X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]),
// imag(result[i+1]) + imag(y[i+1]), real(result[i+1]) + real(y[i+1]) }
ADDPS (DX)(AX*8), X3
ADDPS 16(DX)(AX*8), X5
ADDPS 32(DX)(AX*8), X7
ADDPS 48(DX)(AX*8), X9
MOVUPS X3, (DI)(AX*8) // y[i:i+1] = X_i
MOVUPS X5, 16(DI)(AX*8)
MOVUPS X7, 32(DI)(AX*8)
MOVUPS X9, 48(DI)(AX*8)
ADDQ $8, AX // i += 8
DECQ BX // --BX
JNZ caxy_loop // } while BX > 0
CMPQ CX, $0 // if CX == 0 { return }
JE caxy_end
caxy_tail: // do {
MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) }
MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(a)*real(x[i]) - imag(a)*imag(x[i]) }
ADDSUBPS_X2_X3
MOVSD (DX)(AX*8), X4 // X3 += y[i]
ADDPS X4, X3
MOVSD X3, (DI)(AX*8) // y[i] = X3
INCQ AX // ++i
LOOP caxy_tail // } while --CX > 0
caxy_end:
RET

7
vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package c64
func conj(c complex64) complex64 { return complex(real(c), -imag(c)) }

6
vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go generated vendored Normal file
View File

@@ -0,0 +1,6 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package c64 provides complex64 vector primitives.
package c64 // import "gonum.org/v1/gonum/internal/asm/c64"

View File

@@ -0,0 +1,160 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2
#define MOVSHDUP_X5_X4 LONG $0xE5160FF3 // MOVSHDUP X5, X4
#define MOVSHDUP_X7_X6 LONG $0xF7160FF3 // MOVSHDUP X7, X6
#define MOVSHDUP_X9_X8 LONG $0x160F45F3; BYTE $0xC1 // MOVSHDUP X9, X8
#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3
#define MOVSLDUP_X5_X5 LONG $0xED120FF3 // MOVSLDUP X5, X5
#define MOVSLDUP_X7_X7 LONG $0xFF120FF3 // MOVSLDUP X7, X7
#define MOVSLDUP_X9_X9 LONG $0x120F45F3; BYTE $0xC9 // MOVSLDUP X9, X9
#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3
#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5
#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7
#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define SUM X0
#define P_SUM X1
#define INC_X R8
#define INCx3_X R9
#define INC_Y R10
#define INCx3_Y R11
#define NEG1 X15
#define P_NEG1 X14
// func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64)
TEXT ·DotcInc(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
PXOR SUM, SUM // SUM = 0
PXOR P_SUM, P_SUM // P_SUM = 0
MOVQ n+48(FP), LEN // LEN = n
CMPQ LEN, $0 // if LEN == 0 { return }
JE dotc_end
MOVQ ix+72(FP), INC_X
MOVQ iy+80(FP), INC_Y
LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(X_PTR[ix])
LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(Y_PTR[iy])
MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(complex64)
SHLQ $3, INC_X
MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(complex64)
SHLQ $3, INC_Y
MOVSS $(-1.0), NEG1
SHUFPS $0, NEG1, NEG1 // { -1, -1, -1, -1 }
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = LEN % 4
SHRQ $2, LEN // LEN = floor( LEN / 4 )
JZ dotc_tail // if LEN == 0 { goto dotc_tail }
MOVUPS NEG1, P_NEG1 // Copy NEG1 for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3
dotc_loop: // do {
MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) }
MOVSD (X_PTR)(INC_X*1), X5
MOVSD (X_PTR)(INC_X*2), X7
MOVSD (X_PTR)(INCx3_X*1), X9
// X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSHDUP_X3_X2
MOVSHDUP_X5_X4
MOVSHDUP_X7_X6
MOVSHDUP_X9_X8
// X_i = { real(x[i]), real(x[i]) }
MOVSLDUP_X3_X3
MOVSLDUP_X5_X5
MOVSLDUP_X7_X7
MOVSLDUP_X9_X9
// X_(i-1) = { -imag(x[i]), -imag(x[i]) }
MULPS NEG1, X2
MULPS P_NEG1, X4
MULPS NEG1, X6
MULPS P_NEG1, X8
// X_j = { imag(y[i]), real(y[i]) }
MOVSD (Y_PTR), X10
MOVSD (Y_PTR)(INC_Y*1), X11
MOVSD (Y_PTR)(INC_Y*2), X12
MOVSD (Y_PTR)(INCx3_Y*1), X13
// X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) }
MULPS X10, X3
MULPS X11, X5
MULPS X12, X7
MULPS X13, X9
// X_j = { real(y[i]), imag(y[i]) }
SHUFPS $0xB1, X10, X10
SHUFPS $0xB1, X11, X11
SHUFPS $0xB1, X12, X12
SHUFPS $0xB1, X13, X13
// X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) }
MULPS X10, X2
MULPS X11, X4
MULPS X12, X6
MULPS X13, X8
// X_i = {
// imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]),
// real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]) }
ADDSUBPS_X2_X3
ADDSUBPS_X4_X5
ADDSUBPS_X6_X7
ADDSUBPS_X8_X9
// SUM += X_i
ADDPS X3, SUM
ADDPS X5, P_SUM
ADDPS X7, SUM
ADDPS X9, P_SUM
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X*4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y*4])
DECQ LEN
JNZ dotc_loop // } while --LEN > 0
ADDPS P_SUM, SUM // SUM = { P_SUM + SUM }
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dotc_end
dotc_tail: // do {
MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) }
MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) }
MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]), real(y[i]) }
MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) }
SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) }
MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) }
// X_i = {
// imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]),
// real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) }
ADDSUBPS_X2_X3
ADDPS X3, SUM // SUM += X_i
ADDQ INC_X, X_PTR // X_PTR += INC_X
ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y
DECQ TAIL
JNZ dotc_tail // } while --TAIL > 0
dotc_end:
MOVSD SUM, sum+88(FP) // return SUM
RET

View File

@@ -0,0 +1,208 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVSLDUP_XPTR_IDX_8__X3 LONG $0x1C120FF3; BYTE $0xC6 // MOVSLDUP (SI)(AX*8), X3
#define MOVSLDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF3; WORD $0x10C6 // MOVSLDUP 16(SI)(AX*8), X5
#define MOVSLDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF3; WORD $0x20C6 // MOVSLDUP 32(SI)(AX*8), X7
#define MOVSLDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F3; WORD $0xC64C; BYTE $0x30 // MOVSLDUP 48(SI)(AX*8), X9
#define MOVSHDUP_XPTR_IDX_8__X2 LONG $0x14160FF3; BYTE $0xC6 // MOVSHDUP (SI)(AX*8), X2
#define MOVSHDUP_16_XPTR_IDX_8__X4 LONG $0x64160FF3; WORD $0x10C6 // MOVSHDUP 16(SI)(AX*8), X4
#define MOVSHDUP_32_XPTR_IDX_8__X6 LONG $0x74160FF3; WORD $0x20C6 // MOVSHDUP 32(SI)(AX*8), X6
#define MOVSHDUP_48_XPTR_IDX_8__X8 LONG $0x160F44F3; WORD $0xC644; BYTE $0x30 // MOVSHDUP 48(SI)(AX*8), X8
#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2
#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3
#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3
#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5
#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7
#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define SUM X0
#define P_SUM X1
#define IDX AX
#define I_IDX DX
#define NEG1 X15
#define P_NEG1 X14
// func DotcUnitary(x, y []complex64) (sum complex64)
TEXT ·DotcUnitary(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
PXOR SUM, SUM // SUM = 0
PXOR P_SUM, P_SUM // P_SUM = 0
MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) )
CMPQ y_len+32(FP), LEN
CMOVQLE y_len+32(FP), LEN
CMPQ LEN, $0 // if LEN == 0 { return }
JE dotc_end
XORQ IDX, IDX // i = 0
MOVSS $(-1.0), NEG1
SHUFPS $0, NEG1, NEG1 // { -1, -1, -1, -1 }
MOVQ X_PTR, DX
ANDQ $15, DX // DX = &x & 15
JZ dotc_aligned // if DX == 0 { goto dotc_aligned }
MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) }
MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) }
MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) }
MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) }
SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) }
MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) }
// X_i = {
// imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]),
// real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) }
ADDSUBPS_X2_X3
MOVAPS X3, SUM // SUM = X_i
INCQ IDX // IDX++
DECQ LEN // LEN--
JZ dotc_ret // if LEN == 0 { goto dotc_ret }
dotc_aligned:
MOVQ LEN, TAIL
ANDQ $7, TAIL // TAIL = LEN % 8
SHRQ $3, LEN // LEN = floor( LEN / 8 )
JZ dotc_tail // if LEN == 0 { return }
MOVUPS NEG1, P_NEG1 // Copy NEG1 for pipelining
dotc_loop: // do {
MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) }
MOVSLDUP_16_XPTR_IDX_8__X5
MOVSLDUP_32_XPTR_IDX_8__X7
MOVSLDUP_48_XPTR_IDX_8__X9
MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i+1]), imag(x[i+1]) }
MOVSHDUP_16_XPTR_IDX_8__X4
MOVSHDUP_32_XPTR_IDX_8__X6
MOVSHDUP_48_XPTR_IDX_8__X8
// X_j = { imag(y[i]), real(y[i]), imag(y[i+1]), real(y[i+1]) }
MOVUPS (Y_PTR)(IDX*8), X10
MOVUPS 16(Y_PTR)(IDX*8), X11
MOVUPS 32(Y_PTR)(IDX*8), X12
MOVUPS 48(Y_PTR)(IDX*8), X13
// X_(i-1) = { -imag(x[i]), -imag(x[i]), -imag(x[i]+1), -imag(x[i]+1) }
MULPS NEG1, X2
MULPS P_NEG1, X4
MULPS NEG1, X6
MULPS P_NEG1, X8
// X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]),
// imag(y[i+1]) * real(x[i+1]), real(y[i+1]) * real(x[i+1]) }
MULPS X10, X3
MULPS X11, X5
MULPS X12, X7
MULPS X13, X9
// X_j = { real(y[i]), imag(y[i]), real(y[i+1]), imag(y[i+1]) }
SHUFPS $0xB1, X10, X10
SHUFPS $0xB1, X11, X11
SHUFPS $0xB1, X12, X12
SHUFPS $0xB1, X13, X13
// X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]),
// real(y[i+1]) * imag(x[i+1]), imag(y[i+1]) * imag(x[i+1]) }
MULPS X10, X2
MULPS X11, X4
MULPS X12, X6
MULPS X13, X8
// X_i = {
// imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]),
// real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]),
// imag(result[i+1]): imag(y[i+1]) * real(x[i+1]) + real(y[i+1]) * imag(x[i+1]),
// real(result[i+1]): real(y[i+1]) * real(x[i+1]) - imag(y[i+1]) * imag(x[i+1]),
// }
ADDSUBPS_X2_X3
ADDSUBPS_X4_X5
ADDSUBPS_X6_X7
ADDSUBPS_X8_X9
// SUM += X_i
ADDPS X3, SUM
ADDPS X5, P_SUM
ADDPS X7, SUM
ADDPS X9, P_SUM
ADDQ $8, IDX // IDX += 8
DECQ LEN
JNZ dotc_loop // } while --LEN > 0
ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] }
XORPS SUM, SUM // SUM = 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dotc_end
dotc_tail:
MOVQ TAIL, LEN
SHRQ $1, LEN // LEN = floor( LEN / 2 )
JZ dotc_tail_one // if LEN == 0 { goto dotc_tail_one }
dotc_tail_two: // do {
MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) }
MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) }
MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) }
MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) }
MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) }
SHUFPS $0xB1, X10, X10 // X_j = { real(y[i]), imag(y[i]) }
MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) }
// X_i = {
// imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]),
// real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) }
ADDSUBPS_X2_X3
ADDPS X3, SUM // SUM += X_i
ADDQ $2, IDX // IDX += 2
DECQ LEN
JNZ dotc_tail_two // } while --LEN > 0
ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] }
XORPS SUM, SUM // SUM = 0
ANDQ $1, TAIL
JZ dotc_end
dotc_tail_one:
MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) }
MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) }
MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) }
MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) }
SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) }
MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) }
// X_i = {
// imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]),
// real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) }
ADDSUBPS_X2_X3
ADDPS X3, SUM // SUM += X_i
dotc_end:
ADDPS P_SUM, SUM // SUM = { P_SUM[0] + SUM[0] }
MOVHLPS P_SUM, P_SUM // P_SUM = { P_SUM[1], P_SUM[1] }
ADDPS P_SUM, SUM // SUM = { P_SUM[1] + SUM[0] }
dotc_ret:
MOVSD SUM, sum+48(FP) // return SUM
RET

View File

@@ -0,0 +1,148 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2
#define MOVSHDUP_X5_X4 LONG $0xE5160FF3 // MOVSHDUP X5, X4
#define MOVSHDUP_X7_X6 LONG $0xF7160FF3 // MOVSHDUP X7, X6
#define MOVSHDUP_X9_X8 LONG $0x160F45F3; BYTE $0xC1 // MOVSHDUP X9, X8
#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3
#define MOVSLDUP_X5_X5 LONG $0xED120FF3 // MOVSLDUP X5, X5
#define MOVSLDUP_X7_X7 LONG $0xFF120FF3 // MOVSLDUP X7, X7
#define MOVSLDUP_X9_X9 LONG $0x120F45F3; BYTE $0xC9 // MOVSLDUP X9, X9
#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3
#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5
#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7
#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define SUM X0
#define P_SUM X1
#define INC_X R8
#define INCx3_X R9
#define INC_Y R10
#define INCx3_Y R11
// func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64)
TEXT ·DotuInc(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
PXOR SUM, SUM // SUM = 0
PXOR P_SUM, P_SUM // P_SUM = 0
MOVQ n+48(FP), LEN // LEN = n
CMPQ LEN, $0 // if LEN == 0 { return }
JE dotu_end
MOVQ ix+72(FP), INC_X
MOVQ iy+80(FP), INC_Y
LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(X_PTR[ix])
LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(Y_PTR[iy])
MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(complex64)
SHLQ $3, INC_X
MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(complex64)
SHLQ $3, INC_Y
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = LEN % 4
SHRQ $2, LEN // LEN = floor( LEN / 4 )
JZ dotu_tail // if TAIL == 0 { goto dotu_tail }
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3
dotu_loop: // do {
MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) }
MOVSD (X_PTR)(INC_X*1), X5
MOVSD (X_PTR)(INC_X*2), X7
MOVSD (X_PTR)(INCx3_X*1), X9
// X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSHDUP_X3_X2
MOVSHDUP_X5_X4
MOVSHDUP_X7_X6
MOVSHDUP_X9_X8
// X_i = { real(x[i]), real(x[i]) }
MOVSLDUP_X3_X3
MOVSLDUP_X5_X5
MOVSLDUP_X7_X7
MOVSLDUP_X9_X9
// X_j = { imag(y[i]), real(y[i]) }
MOVSD (Y_PTR), X10
MOVSD (Y_PTR)(INC_Y*1), X11
MOVSD (Y_PTR)(INC_Y*2), X12
MOVSD (Y_PTR)(INCx3_Y*1), X13
// X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) }
MULPS X10, X3
MULPS X11, X5
MULPS X12, X7
MULPS X13, X9
// X_j = { real(y[i]), imag(y[i]) }
SHUFPS $0xB1, X10, X10
SHUFPS $0xB1, X11, X11
SHUFPS $0xB1, X12, X12
SHUFPS $0xB1, X13, X13
// X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) }
MULPS X10, X2
MULPS X11, X4
MULPS X12, X6
MULPS X13, X8
// X_i = {
// imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]),
// real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]) }
ADDSUBPS_X2_X3
ADDSUBPS_X4_X5
ADDSUBPS_X6_X7
ADDSUBPS_X8_X9
// SUM += X_i
ADDPS X3, SUM
ADDPS X5, P_SUM
ADDPS X7, SUM
ADDPS X9, P_SUM
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X*4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y*4])
DECQ LEN
JNZ dotu_loop // } while --LEN > 0
ADDPS P_SUM, SUM // SUM = { P_SUM + SUM }
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dotu_end
dotu_tail: // do {
MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) }
MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]), real(y[i]) }
MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) }
SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) }
MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) }
// X_i = {
// imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]),
// real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) }
ADDSUBPS_X2_X3
ADDPS X3, SUM // SUM += X_i
ADDQ INC_X, X_PTR // X_PTR += INC_X
ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y
DECQ TAIL
JNZ dotu_tail // } while --TAIL > 0
dotu_end:
MOVSD SUM, sum+88(FP) // return SUM
RET

View File

@@ -0,0 +1,197 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVSLDUP_XPTR_IDX_8__X3 LONG $0x1C120FF3; BYTE $0xC6 // MOVSLDUP (SI)(AX*8), X3
#define MOVSLDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF3; WORD $0x10C6 // MOVSLDUP 16(SI)(AX*8), X5
#define MOVSLDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF3; WORD $0x20C6 // MOVSLDUP 32(SI)(AX*8), X7
#define MOVSLDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F3; WORD $0xC64C; BYTE $0x30 // MOVSLDUP 48(SI)(AX*8), X9
#define MOVSHDUP_XPTR_IDX_8__X2 LONG $0x14160FF3; BYTE $0xC6 // MOVSHDUP (SI)(AX*8), X2
#define MOVSHDUP_16_XPTR_IDX_8__X4 LONG $0x64160FF3; WORD $0x10C6 // MOVSHDUP 16(SI)(AX*8), X4
#define MOVSHDUP_32_XPTR_IDX_8__X6 LONG $0x74160FF3; WORD $0x20C6 // MOVSHDUP 32(SI)(AX*8), X6
#define MOVSHDUP_48_XPTR_IDX_8__X8 LONG $0x160F44F3; WORD $0xC644; BYTE $0x30 // MOVSHDUP 48(SI)(AX*8), X8
#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2
#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3
#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3
#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5
#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7
#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define SUM X0
#define P_SUM X1
#define IDX AX
#define I_IDX DX
#define NEG1 X15
#define P_NEG1 X14
// func DotuUnitary(x, y []complex64) (sum complex64)
TEXT ·DotuUnitary(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
PXOR SUM, SUM // SUM = 0
PXOR P_SUM, P_SUM // P_SUM = 0
MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) )
CMPQ y_len+32(FP), LEN
CMOVQLE y_len+32(FP), LEN
CMPQ LEN, $0 // if LEN == 0 { return }
JE dotu_end
XORQ IDX, IDX // IDX = 0
MOVQ X_PTR, DX
ANDQ $15, DX // DX = &x & 15
JZ dotu_aligned // if DX == 0 { goto dotu_aligned }
MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) }
MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) }
MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) }
SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) }
MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) }
// X_i = {
// imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]),
// real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) }
ADDSUBPS_X2_X3
MOVAPS X3, SUM // SUM = X_i
INCQ IDX // IDX++
DECQ LEN // LEN--
JZ dotu_end // if LEN == 0 { goto dotu_end }
dotu_aligned:
MOVQ LEN, TAIL
ANDQ $7, TAIL // TAIL = LEN % 8
SHRQ $3, LEN // LEN = floor( LEN / 8 )
JZ dotu_tail // if LEN == 0 { goto dotu_tail }
PXOR P_SUM, P_SUM
dotu_loop: // do {
MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) }
MOVSLDUP_16_XPTR_IDX_8__X5
MOVSLDUP_32_XPTR_IDX_8__X7
MOVSLDUP_48_XPTR_IDX_8__X9
MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) }
MOVSHDUP_16_XPTR_IDX_8__X4
MOVSHDUP_32_XPTR_IDX_8__X6
MOVSHDUP_48_XPTR_IDX_8__X8
// X_j = { imag(y[i]), real(y[i]), imag(y[i+1]), real(y[i+1]) }
MOVUPS (Y_PTR)(IDX*8), X10
MOVUPS 16(Y_PTR)(IDX*8), X11
MOVUPS 32(Y_PTR)(IDX*8), X12
MOVUPS 48(Y_PTR)(IDX*8), X13
// X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]),
// imag(y[i+1]) * real(x[i+1]), real(y[i+1]) * real(x[i+1]) }
MULPS X10, X3
MULPS X11, X5
MULPS X12, X7
MULPS X13, X9
// X_j = { real(y[i]), imag(y[i]), real(y[i+1]), imag(y[i+1]) }
SHUFPS $0xB1, X10, X10
SHUFPS $0xB1, X11, X11
SHUFPS $0xB1, X12, X12
SHUFPS $0xB1, X13, X13
// X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]),
// real(y[i+1]) * imag(x[i+1]), imag(y[i+1]) * imag(x[i+1]) }
MULPS X10, X2
MULPS X11, X4
MULPS X12, X6
MULPS X13, X8
// X_i = {
// imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]),
// real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]),
// imag(result[i+1]): imag(y[i+1]) * real(x[i+1]) + real(y[i+1]) * imag(x[i+1]),
// real(result[i+1]): real(y[i+1]) * real(x[i+1]) - imag(y[i+1]) * imag(x[i+1]),
// }
ADDSUBPS_X2_X3
ADDSUBPS_X4_X5
ADDSUBPS_X6_X7
ADDSUBPS_X8_X9
// SUM += X_i
ADDPS X3, SUM
ADDPS X5, P_SUM
ADDPS X7, SUM
ADDPS X9, P_SUM
ADDQ $8, IDX // IDX += 8
DECQ LEN
JNZ dotu_loop // } while --LEN > 0
ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] }
XORPS SUM, SUM // SUM = 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dotu_end
dotu_tail:
MOVQ TAIL, LEN
SHRQ $1, LEN // LEN = floor( LEN / 2 )
JZ dotu_tail_one // if LEN == 0 { goto dotc_tail_one }
dotu_tail_two: // do {
MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) }
MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) }
MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) }
MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) }
SHUFPS $0xB1, X10, X10 // X_j = { real(y[i]), imag(y[i]) }
MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) }
// X_i = {
// imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]),
// real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) }
ADDSUBPS_X2_X3
ADDPS X3, SUM // SUM += X_i
ADDQ $2, IDX // IDX += 2
DECQ LEN
JNZ dotu_tail_two // } while --LEN > 0
ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] }
XORPS SUM, SUM // SUM = 0
ANDQ $1, TAIL
JZ dotu_end
dotu_tail_one:
MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) }
MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) }
MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) }
SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) }
MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) }
// X_i = {
// imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]),
// real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) }
ADDSUBPS_X2_X3
ADDPS X3, SUM // SUM += X_i
dotu_end:
ADDPS P_SUM, SUM // SUM = { P_SUM[0] + SUM[0] }
MOVHLPS P_SUM, P_SUM // P_SUM = { P_SUM[1], P_SUM[1] }
ADDPS P_SUM, SUM // SUM = { P_SUM[1] + SUM[0] }
dotu_ret:
MOVSD SUM, sum+48(FP) // return SUM
RET

79
vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go generated vendored Normal file
View File

@@ -0,0 +1,79 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package c64
// ScalUnitary is
// for i := range x {
// x[i] *= alpha
// }
func ScalUnitary(alpha complex64, x []complex64) {
for i := range x {
x[i] *= alpha
}
}
// ScalUnitaryTo is
// for i, v := range x {
// dst[i] = alpha * v
// }
func ScalUnitaryTo(dst []complex64, alpha complex64, x []complex64) {
for i, v := range x {
dst[i] = alpha * v
}
}
// ScalInc is
// var ix uintptr
// for i := 0; i < int(n); i++ {
// x[ix] *= alpha
// ix += incX
// }
func ScalInc(alpha complex64, x []complex64, n, incX uintptr) {
var ix uintptr
for i := 0; i < int(n); i++ {
x[ix] *= alpha
ix += incX
}
}
// ScalIncTo is
// var idst, ix uintptr
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha * x[ix]
// ix += incX
// idst += incDst
// }
func ScalIncTo(dst []complex64, incDst uintptr, alpha complex64, x []complex64, n, incX uintptr) {
var idst, ix uintptr
for i := 0; i < int(n); i++ {
dst[idst] = alpha * x[ix]
ix += incX
idst += incDst
}
}
// SscalUnitary is
// for i, v := range x {
// x[i] = complex(real(v)*alpha, imag(v)*alpha)
// }
func SscalUnitary(alpha float32, x []complex64) {
for i, v := range x {
x[i] = complex(real(v)*alpha, imag(v)*alpha)
}
}
// SscalInc is
// var ix uintptr
// for i := 0; i < int(n); i++ {
// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha)
// ix += inc
// }
func SscalInc(alpha float32, x []complex64, n, inc uintptr) {
var ix uintptr
for i := 0; i < int(n); i++ {
x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha)
ix += inc
}
}

View File

@@ -0,0 +1,68 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
package c64
// AxpyUnitary is
// for i, v := range x {
// y[i] += alpha * v
// }
func AxpyUnitary(alpha complex64, x, y []complex64)
// AxpyUnitaryTo is
// for i, v := range x {
// dst[i] = alpha*v + y[i]
// }
func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64)
// AxpyInc is
// for i := 0; i < int(n); i++ {
// y[iy] += alpha * x[ix]
// ix += incX
// iy += incY
// }
func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr)
// AxpyIncTo is
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha*x[ix] + y[iy]
// ix += incX
// iy += incY
// idst += incDst
// }
func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr)
// DotcUnitary is
// for i, v := range x {
// sum += y[i] * conj(v)
// }
// return sum
func DotcUnitary(x, y []complex64) (sum complex64)
// DotcInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * conj(x[ix])
// ix += incX
// iy += incY
// }
// return sum
func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64)
// DotuUnitary is
// for i, v := range x {
// sum += y[i] * v
// }
// return sum
func DotuUnitary(x, y []complex64) (sum complex64)
// DotuInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * x[ix]
// ix += incX
// iy += incY
// }
// return sum
func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64)

View File

@@ -0,0 +1,113 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 noasm appengine safe
package c64
// AxpyUnitary is
// for i, v := range x {
// y[i] += alpha * v
// }
func AxpyUnitary(alpha complex64, x, y []complex64) {
for i, v := range x {
y[i] += alpha * v
}
}
// AxpyUnitaryTo is
// for i, v := range x {
// dst[i] = alpha*v + y[i]
// }
func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) {
for i, v := range x {
dst[i] = alpha*v + y[i]
}
}
// AxpyInc is
// for i := 0; i < int(n); i++ {
// y[iy] += alpha * x[ix]
// ix += incX
// iy += incY
// }
func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) {
for i := 0; i < int(n); i++ {
y[iy] += alpha * x[ix]
ix += incX
iy += incY
}
}
// AxpyIncTo is
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha*x[ix] + y[iy]
// ix += incX
// iy += incY
// idst += incDst
// }
func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) {
for i := 0; i < int(n); i++ {
dst[idst] = alpha*x[ix] + y[iy]
ix += incX
iy += incY
idst += incDst
}
}
// DotcUnitary is
// for i, v := range x {
// sum += y[i] * conj(v)
// }
// return sum
func DotcUnitary(x, y []complex64) (sum complex64) {
for i, v := range x {
sum += y[i] * conj(v)
}
return sum
}
// DotcInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * conj(x[ix])
// ix += incX
// iy += incY
// }
// return sum
func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) {
for i := 0; i < int(n); i++ {
sum += y[iy] * conj(x[ix])
ix += incX
iy += incY
}
return sum
}
// DotuUnitary is
// for i, v := range x {
// sum += y[i] * v
// }
// return sum
func DotuUnitary(x, y []complex64) (sum complex64) {
for i, v := range x {
sum += y[i] * v
}
return sum
}
// DotuInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * x[ix]
// ix += incX
// iy += incY
// }
// return sum
func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) {
for i := 0; i < int(n); i++ {
sum += y[iy] * x[ix]
ix += incX
iy += incY
}
return sum
}

View File

@@ -0,0 +1,73 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyInc(SB), NOSPLIT, $0
MOVQ n+56(FP), CX // CX = n
CMPQ CX, $0 // if n==0 { return }
JLE axpyi_end
MOVQ x_base+8(FP), SI // SI = &x
MOVQ y_base+32(FP), DI // DI = &y
MOVQ ix+80(FP), R8 // R8 = ix
MOVQ iy+88(FP), R9 // R9 = iy
LEAQ (SI)(R8*4), SI // SI = &(x[ix])
LEAQ (DI)(R9*4), DI // DI = &(y[iy])
MOVQ DI, DX // DX = DI Read Pointer for y
MOVQ incX+64(FP), R8 // R8 = incX
SHLQ $2, R8 // R8 *= sizeof(float32)
MOVQ incY+72(FP), R9 // R9 = incY
SHLQ $2, R9 // R9 *= sizeof(float32)
MOVSS alpha+0(FP), X0 // X0 = alpha
MOVSS X0, X1 // X1 = X0 // for pipelining
MOVQ CX, BX
ANDQ $3, BX // BX = n % 4
SHRQ $2, CX // CX = floor( n / 4 )
JZ axpyi_tail_start // if CX == 0 { goto axpyi_tail_start }
axpyi_loop: // Loop unrolled 4x do {
MOVSS (SI), X2 // X_i = x[i]
MOVSS (SI)(R8*1), X3
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2])
MOVSS (SI), X4
MOVSS (SI)(R8*1), X5
MULSS X1, X2 // X_i *= a
MULSS X0, X3
MULSS X1, X4
MULSS X0, X5
ADDSS (DX), X2 // X_i += y[i]
ADDSS (DX)(R9*1), X3
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
ADDSS (DX), X4
ADDSS (DX)(R9*1), X5
MOVSS X2, (DI) // y[i] = X_i
MOVSS X3, (DI)(R9*1)
LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2])
MOVSS X4, (DI)
MOVSS X5, (DI)(R9*1)
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) // Increment addresses
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2])
LOOP axpyi_loop // } while --CX > 0
CMPQ BX, $0 // if BX == 0 { return }
JE axpyi_end
axpyi_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
axpyi_tail: // do {
MOVSS (SI), X2 // X2 = x[i]
MULSS X1, X2 // X2 *= a
ADDSS (DI), X2 // X2 += y[i]
MOVSS X2, (DI) // y[i] = X2
ADDQ R8, SI // SI = &(SI[incX])
ADDQ R9, DI // DI = &(DI[incY])
LOOP axpyi_tail // } while --CX > 0
axpyi_end:
RET

View File

@@ -0,0 +1,78 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyIncTo(SB), NOSPLIT, $0
MOVQ n+96(FP), CX // CX = n
CMPQ CX, $0 // if n==0 { return }
JLE axpyi_end
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ x_base+48(FP), SI // SI = &x
MOVQ y_base+72(FP), DX // DX = &y
MOVQ ix+120(FP), R8 // R8 = ix // Load the first index
MOVQ iy+128(FP), R9 // R9 = iy
MOVQ idst+32(FP), R10 // R10 = idst
LEAQ (SI)(R8*4), SI // SI = &(x[ix])
LEAQ (DX)(R9*4), DX // DX = &(y[iy])
LEAQ (DI)(R10*4), DI // DI = &(dst[idst])
MOVQ incX+104(FP), R8 // R8 = incX
SHLQ $2, R8 // R8 *= sizeof(float32)
MOVQ incY+112(FP), R9 // R9 = incY
SHLQ $2, R9 // R9 *= sizeof(float32)
MOVQ incDst+24(FP), R10 // R10 = incDst
SHLQ $2, R10 // R10 *= sizeof(float32)
MOVSS alpha+40(FP), X0 // X0 = alpha
MOVSS X0, X1 // X1 = X0 // for pipelining
MOVQ CX, BX
ANDQ $3, BX // BX = n % 4
SHRQ $2, CX // CX = floor( n / 4 )
JZ axpyi_tail_start // if CX == 0 { goto axpyi_tail_start }
axpyi_loop: // Loop unrolled 4x do {
MOVSS (SI), X2 // X_i = x[i]
MOVSS (SI)(R8*1), X3
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2])
MOVSS (SI), X4
MOVSS (SI)(R8*1), X5
MULSS X1, X2 // X_i *= a
MULSS X0, X3
MULSS X1, X4
MULSS X0, X5
ADDSS (DX), X2 // X_i += y[i]
ADDSS (DX)(R9*1), X3
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
ADDSS (DX), X4
ADDSS (DX)(R9*1), X5
MOVSS X2, (DI) // dst[i] = X_i
MOVSS X3, (DI)(R10*1)
LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2])
MOVSS X4, (DI)
MOVSS X5, (DI)(R10*1)
LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) // Increment addresses
LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2])
LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2])
LOOP axpyi_loop // } while --CX > 0
CMPQ BX, $0 // if BX == 0 { return }
JE axpyi_end
axpyi_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
axpyi_tail: // do {
MOVSS (SI), X2 // X2 = x[i]
MULSS X1, X2 // X2 *= a
ADDSS (DX), X2 // X2 += y[i]
MOVSS X2, (DI) // dst[i] = X2
ADDQ R8, SI // SI = &(SI[incX])
ADDQ R9, DX // DX = &(DX[incY])
ADDQ R10, DI // DI = &(DI[incY])
LOOP axpyi_tail // } while --CX > 0
axpyi_end:
RET

View File

@@ -0,0 +1,97 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// func AxpyUnitary(alpha float32, x, y []float32)
TEXT ·AxpyUnitary(SB), NOSPLIT, $0
MOVQ x_base+8(FP), SI // SI = &x
MOVQ y_base+32(FP), DI // DI = &y
MOVQ x_len+16(FP), BX // BX = min( len(x), len(y) )
CMPQ y_len+40(FP), BX
CMOVQLE y_len+40(FP), BX
CMPQ BX, $0 // if BX == 0 { return }
JE axpy_end
MOVSS alpha+0(FP), X0
SHUFPS $0, X0, X0 // X0 = { a, a, a, a }
XORQ AX, AX // i = 0
PXOR X2, X2 // 2 NOP instructions (PXOR) to align
PXOR X3, X3 // loop to cache line
MOVQ DI, CX
ANDQ $0xF, CX // Align on 16-byte boundary for ADDPS
JZ axpy_no_trim // if CX == 0 { goto axpy_no_trim }
XORQ $0xF, CX // CX = 4 - floor( BX % 16 / 4 )
INCQ CX
SHRQ $2, CX
axpy_align: // Trim first value(s) in unaligned buffer do {
MOVSS (SI)(AX*4), X2 // X2 = x[i]
MULSS X0, X2 // X2 *= a
ADDSS (DI)(AX*4), X2 // X2 += y[i]
MOVSS X2, (DI)(AX*4) // y[i] = X2
INCQ AX // i++
DECQ BX
JZ axpy_end // if --BX == 0 { return }
LOOP axpy_align // } while --CX > 0
axpy_no_trim:
MOVUPS X0, X1 // Copy X0 to X1 for pipelining
MOVQ BX, CX
ANDQ $0xF, BX // BX = len % 16
SHRQ $4, CX // CX = int( len / 16 )
JZ axpy_tail4_start // if CX == 0 { return }
axpy_loop: // Loop unrolled 16x do {
MOVUPS (SI)(AX*4), X2 // X2 = x[i:i+4]
MOVUPS 16(SI)(AX*4), X3
MOVUPS 32(SI)(AX*4), X4
MOVUPS 48(SI)(AX*4), X5
MULPS X0, X2 // X2 *= a
MULPS X1, X3
MULPS X0, X4
MULPS X1, X5
ADDPS (DI)(AX*4), X2 // X2 += y[i:i+4]
ADDPS 16(DI)(AX*4), X3
ADDPS 32(DI)(AX*4), X4
ADDPS 48(DI)(AX*4), X5
MOVUPS X2, (DI)(AX*4) // dst[i:i+4] = X2
MOVUPS X3, 16(DI)(AX*4)
MOVUPS X4, 32(DI)(AX*4)
MOVUPS X5, 48(DI)(AX*4)
ADDQ $16, AX // i += 16
LOOP axpy_loop // while (--CX) > 0
CMPQ BX, $0 // if BX == 0 { return }
JE axpy_end
axpy_tail4_start: // Reset loop counter for 4-wide tail loop
MOVQ BX, CX // CX = floor( BX / 4 )
SHRQ $2, CX
JZ axpy_tail_start // if CX == 0 { goto axpy_tail_start }
axpy_tail4: // Loop unrolled 4x do {
MOVUPS (SI)(AX*4), X2 // X2 = x[i]
MULPS X0, X2 // X2 *= a
ADDPS (DI)(AX*4), X2 // X2 += y[i]
MOVUPS X2, (DI)(AX*4) // y[i] = X2
ADDQ $4, AX // i += 4
LOOP axpy_tail4 // } while --CX > 0
axpy_tail_start: // Reset loop counter for 1-wide tail loop
MOVQ BX, CX // CX = BX % 4
ANDQ $3, CX
JZ axpy_end // if CX == 0 { return }
axpy_tail:
MOVSS (SI)(AX*4), X1 // X1 = x[i]
MULSS X0, X1 // X1 *= a
ADDSS (DI)(AX*4), X1 // X1 += y[i]
MOVSS X1, (DI)(AX*4) // y[i] = X1
INCQ AX // i++
LOOP axpy_tail // } while --CX > 0
axpy_end:
RET

View File

@@ -0,0 +1,98 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32)
TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ x_base+32(FP), SI // SI = &x
MOVQ y_base+56(FP), DX // DX = &y
MOVQ x_len+40(FP), BX // BX = min( len(x), len(y), len(dst) )
CMPQ y_len+64(FP), BX
CMOVQLE y_len+64(FP), BX
CMPQ dst_len+8(FP), BX
CMOVQLE dst_len+8(FP), BX
CMPQ BX, $0 // if BX == 0 { return }
JE axpy_end
MOVSS alpha+24(FP), X0
SHUFPS $0, X0, X0 // X0 = { a, a, a, a, }
XORQ AX, AX // i = 0
MOVQ DX, CX
ANDQ $0xF, CX // Align on 16-byte boundary for ADDPS
JZ axpy_no_trim // if CX == 0 { goto axpy_no_trim }
XORQ $0xF, CX // CX = 4 - floor ( B % 16 / 4 )
INCQ CX
SHRQ $2, CX
axpy_align: // Trim first value(s) in unaligned buffer do {
MOVSS (SI)(AX*4), X2 // X2 = x[i]
MULSS X0, X2 // X2 *= a
ADDSS (DX)(AX*4), X2 // X2 += y[i]
MOVSS X2, (DI)(AX*4) // y[i] = X2
INCQ AX // i++
DECQ BX
JZ axpy_end // if --BX == 0 { return }
LOOP axpy_align // } while --CX > 0
axpy_no_trim:
MOVUPS X0, X1 // Copy X0 to X1 for pipelining
MOVQ BX, CX
ANDQ $0xF, BX // BX = len % 16
SHRQ $4, CX // CX = floor( len / 16 )
JZ axpy_tail4_start // if CX == 0 { return }
axpy_loop: // Loop unrolled 16x do {
MOVUPS (SI)(AX*4), X2 // X2 = x[i:i+4]
MOVUPS 16(SI)(AX*4), X3
MOVUPS 32(SI)(AX*4), X4
MOVUPS 48(SI)(AX*4), X5
MULPS X0, X2 // X2 *= a
MULPS X1, X3
MULPS X0, X4
MULPS X1, X5
ADDPS (DX)(AX*4), X2 // X2 += y[i:i+4]
ADDPS 16(DX)(AX*4), X3
ADDPS 32(DX)(AX*4), X4
ADDPS 48(DX)(AX*4), X5
MOVUPS X2, (DI)(AX*4) // dst[i:i+4] = X2
MOVUPS X3, 16(DI)(AX*4)
MOVUPS X4, 32(DI)(AX*4)
MOVUPS X5, 48(DI)(AX*4)
ADDQ $16, AX // i += 16
LOOP axpy_loop // while (--CX) > 0
CMPQ BX, $0 // if BX == 0 { return }
JE axpy_end
axpy_tail4_start: // Reset loop counter for 4-wide tail loop
MOVQ BX, CX // CX = floor( BX / 4 )
SHRQ $2, CX
JZ axpy_tail_start // if CX == 0 { goto axpy_tail_start }
axpy_tail4: // Loop unrolled 4x do {
MOVUPS (SI)(AX*4), X2 // X2 = x[i]
MULPS X0, X2 // X2 *= a
ADDPS (DX)(AX*4), X2 // X2 += y[i]
MOVUPS X2, (DI)(AX*4) // y[i] = X2
ADDQ $4, AX // i += 4
LOOP axpy_tail4 // } while --CX > 0
axpy_tail_start: // Reset loop counter for 1-wide tail loop
MOVQ BX, CX // CX = BX % 4
ANDQ $3, CX
JZ axpy_end // if CX == 0 { return }
axpy_tail:
MOVSS (SI)(AX*4), X1 // X1 = x[i]
MULSS X0, X1 // X1 *= a
ADDSS (DX)(AX*4), X1 // X1 += y[i]
MOVSS X1, (DI)(AX*4) // y[i] = X1
INCQ AX // i++
LOOP axpy_tail // } while --CX > 0
axpy_end:
RET

View File

@@ -0,0 +1,91 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R10
#define INC_Y R9
#define INCx3_Y R11
#define SUM X0
#define P_SUM X1
// func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64)
TEXT ·DdotInc(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
MOVQ n+48(FP), LEN // LEN = n
PXOR SUM, SUM // SUM = 0
CMPQ LEN, $0
JE dot_end
MOVQ ix+72(FP), INC_X // INC_X = ix
MOVQ iy+80(FP), INC_Y // INC_Y = iy
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(x[ix])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(y[iy])
MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(float32)
SHLQ $2, INC_X
MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(float32)
SHLQ $2, INC_Y
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = LEN % 4
SHRQ $2, LEN // LEN = floor( LEN / 4 )
JZ dot_tail // if LEN == 0 { goto dot_tail }
PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3
dot_loop: // Loop unrolled 4x do {
CVTSS2SD (X_PTR), X2 // X_i = x[i:i+1]
CVTSS2SD (X_PTR)(INC_X*1), X3
CVTSS2SD (X_PTR)(INC_X*2), X4
CVTSS2SD (X_PTR)(INCx3_X*1), X5
CVTSS2SD (Y_PTR), X6 // X_j = y[i:i+1]
CVTSS2SD (Y_PTR)(INC_Y*1), X7
CVTSS2SD (Y_PTR)(INC_Y*2), X8
CVTSS2SD (Y_PTR)(INCx3_Y*1), X9
MULSD X6, X2 // X_i *= X_j
MULSD X7, X3
MULSD X8, X4
MULSD X9, X5
ADDSD X2, SUM // SUM += X_i
ADDSD X3, P_SUM
ADDSD X4, SUM
ADDSD X5, P_SUM
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X * 4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y * 4])
DECQ LEN
JNZ dot_loop // } while --LEN > 0
ADDSD P_SUM, SUM // SUM += P_SUM
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dot_end
dot_tail: // do {
CVTSS2SD (X_PTR), X2 // X2 = x[i]
CVTSS2SD (Y_PTR), X3 // X2 *= y[i]
MULSD X3, X2
ADDSD X2, SUM // SUM += X2
ADDQ INC_X, X_PTR // X_PTR += INC_X
ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y
DECQ TAIL
JNZ dot_tail // } while --TAIL > 0
dot_end:
MOVSD SUM, sum+88(FP) // return SUM
RET

View File

@@ -0,0 +1,110 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define HADDPD_SUM_SUM LONG $0xC07C0F66 // @ HADDPD X0, X0
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define IDX AX
#define SUM X0
#define P_SUM X1
// func DdotUnitary(x, y []float32) (sum float32)
TEXT ·DdotUnitary(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) )
CMPQ y_len+32(FP), LEN
CMOVQLE y_len+32(FP), LEN
PXOR SUM, SUM // psum = 0
CMPQ LEN, $0
JE dot_end
XORQ IDX, IDX
MOVQ Y_PTR, DX
ANDQ $0xF, DX // Align on 16-byte boundary for ADDPS
JZ dot_no_trim // if DX == 0 { goto dot_no_trim }
SUBQ $16, DX
dot_align: // Trim first value(s) in unaligned buffer do {
CVTSS2SD (X_PTR)(IDX*4), X2 // X2 = float64(x[i])
CVTSS2SD (Y_PTR)(IDX*4), X3 // X3 = float64(y[i])
MULSD X3, X2
ADDSD X2, SUM // SUM += X2
INCQ IDX // IDX++
DECQ LEN
JZ dot_end // if --TAIL == 0 { return }
ADDQ $4, DX
JNZ dot_align // } while --LEN > 0
dot_no_trim:
PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining
MOVQ LEN, TAIL
ANDQ $0x7, TAIL // TAIL = LEN % 8
SHRQ $3, LEN // LEN = floor( LEN / 8 )
JZ dot_tail_start // if LEN == 0 { goto dot_tail_start }
dot_loop: // Loop unrolled 8x do {
CVTPS2PD (X_PTR)(IDX*4), X2 // X_i = x[i:i+1]
CVTPS2PD 8(X_PTR)(IDX*4), X3
CVTPS2PD 16(X_PTR)(IDX*4), X4
CVTPS2PD 24(X_PTR)(IDX*4), X5
CVTPS2PD (Y_PTR)(IDX*4), X6 // X_j = y[i:i+1]
CVTPS2PD 8(Y_PTR)(IDX*4), X7
CVTPS2PD 16(Y_PTR)(IDX*4), X8
CVTPS2PD 24(Y_PTR)(IDX*4), X9
MULPD X6, X2 // X_i *= X_j
MULPD X7, X3
MULPD X8, X4
MULPD X9, X5
ADDPD X2, SUM // SUM += X_i
ADDPD X3, P_SUM
ADDPD X4, SUM
ADDPD X5, P_SUM
ADDQ $8, IDX // IDX += 8
DECQ LEN
JNZ dot_loop // } while --LEN > 0
ADDPD P_SUM, SUM // SUM += P_SUM
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dot_end
dot_tail_start:
MOVQ TAIL, LEN
SHRQ $1, LEN
JZ dot_tail_one
dot_tail_two:
CVTPS2PD (X_PTR)(IDX*4), X2 // X_i = x[i:i+1]
CVTPS2PD (Y_PTR)(IDX*4), X6 // X_j = y[i:i+1]
MULPD X6, X2 // X_i *= X_j
ADDPD X2, SUM // SUM += X_i
ADDQ $2, IDX // IDX += 2
DECQ LEN
JNZ dot_tail_two // } while --LEN > 0
ANDQ $1, TAIL
JZ dot_end
dot_tail_one:
CVTSS2SD (X_PTR)(IDX*4), X2 // X2 = float64(x[i])
CVTSS2SD (Y_PTR)(IDX*4), X3 // X3 = float64(y[i])
MULSD X3, X2 // X2 *= X3
ADDSD X2, SUM // SUM += X2
dot_end:
HADDPD_SUM_SUM // SUM = \sum{ SUM[i] }
MOVSD SUM, sum+48(FP) // return SUM
RET

6
vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go generated vendored Normal file
View File

@@ -0,0 +1,6 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package f32 provides float32 vector primitives.
package f32 // import "gonum.org/v1/gonum/internal/asm/f32"

View File

@@ -0,0 +1,85 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R10
#define INC_Y R9
#define INCx3_Y R11
#define SUM X0
#define P_SUM X1
// func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32)
TEXT ·DotInc(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
PXOR SUM, SUM // SUM = 0
MOVQ n+48(FP), LEN // LEN = n
CMPQ LEN, $0
JE dot_end
MOVQ ix+72(FP), INC_X // INC_X = ix
MOVQ iy+80(FP), INC_Y // INC_Y = iy
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(x[ix])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(y[iy])
MOVQ incX+56(FP), INC_X // INC_X := incX * sizeof(float32)
SHLQ $2, INC_X
MOVQ incY+64(FP), INC_Y // INC_Y := incY * sizeof(float32)
SHLQ $2, INC_Y
MOVQ LEN, TAIL
ANDQ $0x3, TAIL // TAIL = LEN % 4
SHRQ $2, LEN // LEN = floor( LEN / 4 )
JZ dot_tail // if LEN == 0 { goto dot_tail }
PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3
dot_loop: // Loop unrolled 4x do {
MOVSS (X_PTR), X2 // X_i = x[i:i+1]
MOVSS (X_PTR)(INC_X*1), X3
MOVSS (X_PTR)(INC_X*2), X4
MOVSS (X_PTR)(INCx3_X*1), X5
MULSS (Y_PTR), X2 // X_i *= y[i:i+1]
MULSS (Y_PTR)(INC_Y*1), X3
MULSS (Y_PTR)(INC_Y*2), X4
MULSS (Y_PTR)(INCx3_Y*1), X5
ADDSS X2, SUM // SUM += X_i
ADDSS X3, P_SUM
ADDSS X4, SUM
ADDSS X5, P_SUM
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X * 4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y * 4])
DECQ LEN
JNZ dot_loop // } while --LEN > 0
ADDSS P_SUM, SUM // P_SUM += SUM
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dot_end
dot_tail: // do {
MOVSS (X_PTR), X2 // X2 = x[i]
MULSS (Y_PTR), X2 // X2 *= y[i]
ADDSS X2, SUM // SUM += X2
ADDQ INC_X, X_PTR // X_PTR += INC_X
ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y
DECQ TAIL
JNZ dot_tail // } while --TAIL > 0
dot_end:
MOVSS SUM, sum+88(FP) // return SUM
RET

View File

@@ -0,0 +1,106 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define HADDPS_SUM_SUM LONG $0xC07C0FF2 // @ HADDPS X0, X0
#define X_PTR SI
#define Y_PTR DI
#define LEN CX
#define TAIL BX
#define IDX AX
#define SUM X0
#define P_SUM X1
// func DotUnitary(x, y []float32) (sum float32)
TEXT ·DotUnitary(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y
PXOR SUM, SUM // SUM = 0
MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) )
CMPQ y_len+32(FP), LEN
CMOVQLE y_len+32(FP), LEN
CMPQ LEN, $0
JE dot_end
XORQ IDX, IDX
MOVQ Y_PTR, DX
ANDQ $0xF, DX // Align on 16-byte boundary for MULPS
JZ dot_no_trim // if DX == 0 { goto dot_no_trim }
SUBQ $16, DX
dot_align: // Trim first value(s) in unaligned buffer do {
MOVSS (X_PTR)(IDX*4), X2 // X2 = x[i]
MULSS (Y_PTR)(IDX*4), X2 // X2 *= y[i]
ADDSS X2, SUM // SUM += X2
INCQ IDX // IDX++
DECQ LEN
JZ dot_end // if --TAIL == 0 { return }
ADDQ $4, DX
JNZ dot_align // } while --DX > 0
dot_no_trim:
PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining
MOVQ LEN, TAIL
ANDQ $0xF, TAIL // TAIL = LEN % 16
SHRQ $4, LEN // LEN = floor( LEN / 16 )
JZ dot_tail4_start // if LEN == 0 { goto dot_tail4_start }
dot_loop: // Loop unrolled 16x do {
MOVUPS (X_PTR)(IDX*4), X2 // X_i = x[i:i+1]
MOVUPS 16(X_PTR)(IDX*4), X3
MOVUPS 32(X_PTR)(IDX*4), X4
MOVUPS 48(X_PTR)(IDX*4), X5
MULPS (Y_PTR)(IDX*4), X2 // X_i *= y[i:i+1]
MULPS 16(Y_PTR)(IDX*4), X3
MULPS 32(Y_PTR)(IDX*4), X4
MULPS 48(Y_PTR)(IDX*4), X5
ADDPS X2, SUM // SUM += X_i
ADDPS X3, P_SUM
ADDPS X4, SUM
ADDPS X5, P_SUM
ADDQ $16, IDX // IDX += 16
DECQ LEN
JNZ dot_loop // } while --LEN > 0
ADDPS P_SUM, SUM // SUM += P_SUM
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE dot_end
dot_tail4_start: // Reset loop counter for 4-wide tail loop
MOVQ TAIL, LEN // LEN = floor( TAIL / 4 )
SHRQ $2, LEN
JZ dot_tail_start // if LEN == 0 { goto dot_tail_start }
dot_tail4_loop: // Loop unrolled 4x do {
MOVUPS (X_PTR)(IDX*4), X2 // X_i = x[i:i+1]
MULPS (Y_PTR)(IDX*4), X2 // X_i *= y[i:i+1]
ADDPS X2, SUM // SUM += X_i
ADDQ $4, IDX // i += 4
DECQ LEN
JNZ dot_tail4_loop // } while --LEN > 0
dot_tail_start: // Reset loop counter for 1-wide tail loop
ANDQ $3, TAIL // TAIL = TAIL % 4
JZ dot_end // if TAIL == 0 { return }
dot_tail: // do {
MOVSS (X_PTR)(IDX*4), X2 // X2 = x[i]
MULSS (Y_PTR)(IDX*4), X2 // X2 *= y[i]
ADDSS X2, SUM // psum += X2
INCQ IDX // IDX++
DECQ TAIL
JNZ dot_tail // } while --TAIL > 0
dot_end:
HADDPS_SUM_SUM // SUM = \sum{ SUM[i] }
HADDPS_SUM_SUM
MOVSS SUM, sum+48(FP) // return SUM
RET

15
vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
package f32
// Ger performs the rank-one operation
// A += alpha * x * y^T
// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar.
func Ger(m, n uintptr, alpha float32,
x []float32, incX uintptr,
y []float32, incY uintptr,
a []float32, lda uintptr)

757
vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,757 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define SIZE 4
#define BITSIZE 2
#define KERNELSIZE 3
#define M_DIM m+0(FP)
#define M CX
#define N_DIM n+8(FP)
#define N BX
#define TMP1 R14
#define TMP2 R15
#define X_PTR SI
#define Y y_base+56(FP)
#define Y_PTR DX
#define A_ROW AX
#define A_PTR DI
#define INC_X R8
#define INC3_X R9
#define INC_Y R10
#define INC3_Y R11
#define LDA R12
#define LDA3 R13
#define ALPHA X0
#define ALPHA_SPILL al-16(SP)
#define LOAD_ALPHA \
MOVSS alpha+16(FP), ALPHA \
SHUFPS $0, ALPHA, ALPHA
#define LOAD_SCALED4 \
PREFETCHNTA 16*SIZE(X_PTR) \
MOVDDUP (X_PTR), X1 \
MOVDDUP 2*SIZE(X_PTR), X3 \
MOVSHDUP X1, X2 \
MOVSHDUP X3, X4 \
MOVSLDUP X1, X1 \
MOVSLDUP X3, X3 \
MULPS ALPHA, X1 \
MULPS ALPHA, X2 \
MULPS ALPHA, X3 \
MULPS ALPHA, X4
#define LOAD_SCALED2 \
MOVDDUP (X_PTR), X1 \
MOVSHDUP X1, X2 \
MOVSLDUP X1, X1 \
MULPS ALPHA, X1 \
MULPS ALPHA, X2
#define LOAD_SCALED1 \
MOVSS (X_PTR), X1 \
SHUFPS $0, X1, X1 \
MULPS ALPHA, X1
#define LOAD_SCALED4_INC \
PREFETCHNTA (X_PTR)(INC_X*8) \
MOVSS (X_PTR), X1 \
MOVSS (X_PTR)(INC_X*1), X2 \
MOVSS (X_PTR)(INC_X*2), X3 \
MOVSS (X_PTR)(INC3_X*1), X4 \
SHUFPS $0, X1, X1 \
SHUFPS $0, X2, X2 \
SHUFPS $0, X3, X3 \
SHUFPS $0, X4, X4 \
MULPS ALPHA, X1 \
MULPS ALPHA, X2 \
MULPS ALPHA, X3 \
MULPS ALPHA, X4
#define LOAD_SCALED2_INC \
MOVSS (X_PTR), X1 \
MOVSS (X_PTR)(INC_X*1), X2 \
SHUFPS $0, X1, X1 \
SHUFPS $0, X2, X2 \
MULPS ALPHA, X1 \
MULPS ALPHA, X2
#define KERNEL_LOAD8 \
MOVUPS (Y_PTR), X5 \
MOVUPS 4*SIZE(Y_PTR), X6
#define KERNEL_LOAD8_INC \
MOVSS (Y_PTR), X5 \
MOVSS (Y_PTR)(INC_Y*1), X6 \
MOVSS (Y_PTR)(INC_Y*2), X7 \
MOVSS (Y_PTR)(INC3_Y*1), X8 \
UNPCKLPS X6, X5 \
UNPCKLPS X8, X7 \
MOVLHPS X7, X5 \
LEAQ (Y_PTR)(INC_Y*4), Y_PTR \
MOVSS (Y_PTR), X6 \
MOVSS (Y_PTR)(INC_Y*1), X7 \
MOVSS (Y_PTR)(INC_Y*2), X8 \
MOVSS (Y_PTR)(INC3_Y*1), X9 \
UNPCKLPS X7, X6 \
UNPCKLPS X9, X8 \
MOVLHPS X8, X6
#define KERNEL_LOAD4 \
MOVUPS (Y_PTR), X5
#define KERNEL_LOAD4_INC \
MOVSS (Y_PTR), X5 \
MOVSS (Y_PTR)(INC_Y*1), X6 \
MOVSS (Y_PTR)(INC_Y*2), X7 \
MOVSS (Y_PTR)(INC3_Y*1), X8 \
UNPCKLPS X6, X5 \
UNPCKLPS X8, X7 \
MOVLHPS X7, X5
#define KERNEL_LOAD2 \
MOVSD (Y_PTR), X5
#define KERNEL_LOAD2_INC \
MOVSS (Y_PTR), X5 \
MOVSS (Y_PTR)(INC_Y*1), X6 \
UNPCKLPS X6, X5
#define KERNEL_4x8 \
MOVUPS X5, X7 \
MOVUPS X6, X8 \
MOVUPS X5, X9 \
MOVUPS X6, X10 \
MOVUPS X5, X11 \
MOVUPS X6, X12 \
MULPS X1, X5 \
MULPS X1, X6 \
MULPS X2, X7 \
MULPS X2, X8 \
MULPS X3, X9 \
MULPS X3, X10 \
MULPS X4, X11 \
MULPS X4, X12
#define STORE_4x8 \
MOVUPS ALPHA, ALPHA_SPILL \
MOVUPS (A_PTR), X13 \
ADDPS X13, X5 \
MOVUPS 4*SIZE(A_PTR), X14 \
ADDPS X14, X6 \
MOVUPS (A_PTR)(LDA*1), X15 \
ADDPS X15, X7 \
MOVUPS 4*SIZE(A_PTR)(LDA*1), X0 \
ADDPS X0, X8 \
MOVUPS (A_PTR)(LDA*2), X13 \
ADDPS X13, X9 \
MOVUPS 4*SIZE(A_PTR)(LDA*2), X14 \
ADDPS X14, X10 \
MOVUPS (A_PTR)(LDA3*1), X15 \
ADDPS X15, X11 \
MOVUPS 4*SIZE(A_PTR)(LDA3*1), X0 \
ADDPS X0, X12 \
MOVUPS X5, (A_PTR) \
MOVUPS X6, 4*SIZE(A_PTR) \
MOVUPS X7, (A_PTR)(LDA*1) \
MOVUPS X8, 4*SIZE(A_PTR)(LDA*1) \
MOVUPS X9, (A_PTR)(LDA*2) \
MOVUPS X10, 4*SIZE(A_PTR)(LDA*2) \
MOVUPS X11, (A_PTR)(LDA3*1) \
MOVUPS X12, 4*SIZE(A_PTR)(LDA3*1) \
MOVUPS ALPHA_SPILL, ALPHA \
ADDQ $8*SIZE, A_PTR
#define KERNEL_4x4 \
MOVUPS X5, X6 \
MOVUPS X5, X7 \
MOVUPS X5, X8 \
MULPS X1, X5 \
MULPS X2, X6 \
MULPS X3, X7 \
MULPS X4, X8
#define STORE_4x4 \
MOVUPS (A_PTR), X13 \
ADDPS X13, X5 \
MOVUPS (A_PTR)(LDA*1), X14 \
ADDPS X14, X6 \
MOVUPS (A_PTR)(LDA*2), X15 \
ADDPS X15, X7 \
MOVUPS (A_PTR)(LDA3*1), X13 \
ADDPS X13, X8 \
MOVUPS X5, (A_PTR) \
MOVUPS X6, (A_PTR)(LDA*1) \
MOVUPS X7, (A_PTR)(LDA*2) \
MOVUPS X8, (A_PTR)(LDA3*1) \
ADDQ $4*SIZE, A_PTR
#define KERNEL_4x2 \
MOVUPS X5, X6 \
MOVUPS X5, X7 \
MOVUPS X5, X8 \
MULPS X1, X5 \
MULPS X2, X6 \
MULPS X3, X7 \
MULPS X4, X8
#define STORE_4x2 \
MOVSD (A_PTR), X9 \
ADDPS X9, X5 \
MOVSD (A_PTR)(LDA*1), X10 \
ADDPS X10, X6 \
MOVSD (A_PTR)(LDA*2), X11 \
ADDPS X11, X7 \
MOVSD (A_PTR)(LDA3*1), X12 \
ADDPS X12, X8 \
MOVSD X5, (A_PTR) \
MOVSD X6, (A_PTR)(LDA*1) \
MOVSD X7, (A_PTR)(LDA*2) \
MOVSD X8, (A_PTR)(LDA3*1) \
ADDQ $2*SIZE, A_PTR
#define KERNEL_4x1 \
MOVSS (Y_PTR), X5 \
MOVSS X5, X6 \
MOVSS X5, X7 \
MOVSS X5, X8 \
MULSS X1, X5 \
MULSS X2, X6 \
MULSS X3, X7 \
MULSS X4, X8
#define STORE_4x1 \
ADDSS (A_PTR), X5 \
ADDSS (A_PTR)(LDA*1), X6 \
ADDSS (A_PTR)(LDA*2), X7 \
ADDSS (A_PTR)(LDA3*1), X8 \
MOVSS X5, (A_PTR) \
MOVSS X6, (A_PTR)(LDA*1) \
MOVSS X7, (A_PTR)(LDA*2) \
MOVSS X8, (A_PTR)(LDA3*1) \
ADDQ $SIZE, A_PTR
#define KERNEL_2x8 \
MOVUPS X5, X7 \
MOVUPS X6, X8 \
MULPS X1, X5 \
MULPS X1, X6 \
MULPS X2, X7 \
MULPS X2, X8
#define STORE_2x8 \
MOVUPS (A_PTR), X9 \
ADDPS X9, X5 \
MOVUPS 4*SIZE(A_PTR), X10 \
ADDPS X10, X6 \
MOVUPS (A_PTR)(LDA*1), X11 \
ADDPS X11, X7 \
MOVUPS 4*SIZE(A_PTR)(LDA*1), X12 \
ADDPS X12, X8 \
MOVUPS X5, (A_PTR) \
MOVUPS X6, 4*SIZE(A_PTR) \
MOVUPS X7, (A_PTR)(LDA*1) \
MOVUPS X8, 4*SIZE(A_PTR)(LDA*1) \
ADDQ $8*SIZE, A_PTR
#define KERNEL_2x4 \
MOVUPS X5, X6 \
MULPS X1, X5 \
MULPS X2, X6
#define STORE_2x4 \
MOVUPS (A_PTR), X9 \
ADDPS X9, X5 \
MOVUPS (A_PTR)(LDA*1), X11 \
ADDPS X11, X6 \
MOVUPS X5, (A_PTR) \
MOVUPS X6, (A_PTR)(LDA*1) \
ADDQ $4*SIZE, A_PTR
#define KERNEL_2x2 \
MOVSD X5, X6 \
MULPS X1, X5 \
MULPS X2, X6
#define STORE_2x2 \
MOVSD (A_PTR), X7 \
ADDPS X7, X5 \
MOVSD (A_PTR)(LDA*1), X8 \
ADDPS X8, X6 \
MOVSD X5, (A_PTR) \
MOVSD X6, (A_PTR)(LDA*1) \
ADDQ $2*SIZE, A_PTR
#define KERNEL_2x1 \
MOVSS (Y_PTR), X5 \
MOVSS X5, X6 \
MULSS X1, X5 \
MULSS X2, X6
#define STORE_2x1 \
ADDSS (A_PTR), X5 \
ADDSS (A_PTR)(LDA*1), X6 \
MOVSS X5, (A_PTR) \
MOVSS X6, (A_PTR)(LDA*1) \
ADDQ $SIZE, A_PTR
#define KERNEL_1x8 \
MULPS X1, X5 \
MULPS X1, X6
#define STORE_1x8 \
MOVUPS (A_PTR), X7 \
ADDPS X7, X5 \
MOVUPS 4*SIZE(A_PTR), X8 \
ADDPS X8, X6 \
MOVUPS X5, (A_PTR) \
MOVUPS X6, 4*SIZE(A_PTR) \
ADDQ $8*SIZE, A_PTR
#define KERNEL_1x4 \
MULPS X1, X5 \
MULPS X1, X6
#define STORE_1x4 \
MOVUPS (A_PTR), X7 \
ADDPS X7, X5 \
MOVUPS X5, (A_PTR) \
ADDQ $4*SIZE, A_PTR
#define KERNEL_1x2 \
MULPS X1, X5
#define STORE_1x2 \
MOVSD (A_PTR), X6 \
ADDPS X6, X5 \
MOVSD X5, (A_PTR) \
ADDQ $2*SIZE, A_PTR
#define KERNEL_1x1 \
MOVSS (Y_PTR), X5 \
MULSS X1, X5
#define STORE_1x1 \
ADDSS (A_PTR), X5 \
MOVSS X5, (A_PTR) \
ADDQ $SIZE, A_PTR
// func Ger(m, n uintptr, alpha float32,
// x []float32, incX uintptr,
// y []float32, incY uintptr,
// a []float32, lda uintptr)
TEXT ·Ger(SB), 0, $16-120
MOVQ M_DIM, M
MOVQ N_DIM, N
CMPQ M, $0
JE end
CMPQ N, $0
JE end
LOAD_ALPHA
MOVQ x_base+24(FP), X_PTR
MOVQ y_base+56(FP), Y_PTR
MOVQ a_base+88(FP), A_ROW
MOVQ A_ROW, A_PTR
MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float32)
SHLQ $BITSIZE, LDA
LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3
CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path)
JNE inc
CMPQ incX+48(FP), $1 // Check for dense vector X (fast-path)
JNE inc
SHRQ $2, M
JZ r2
r4:
// LOAD 4
LOAD_SCALED4
MOVQ N_DIM, N
SHRQ $KERNELSIZE, N
JZ r4c4
r4c8:
// 4x8 KERNEL
KERNEL_LOAD8
KERNEL_4x8
STORE_4x8
ADDQ $8*SIZE, Y_PTR
DECQ N
JNZ r4c8
r4c4:
TESTQ $4, N_DIM
JZ r4c2
// 4x4 KERNEL
KERNEL_LOAD4
KERNEL_4x4
STORE_4x4
ADDQ $4*SIZE, Y_PTR
r4c2:
TESTQ $2, N_DIM
JZ r4c1
// 4x2 KERNEL
KERNEL_LOAD2
KERNEL_4x2
STORE_4x2
ADDQ $2*SIZE, Y_PTR
r4c1:
TESTQ $1, N_DIM
JZ r4end
// 4x1 KERNEL
KERNEL_4x1
STORE_4x1
ADDQ $SIZE, Y_PTR
r4end:
ADDQ $4*SIZE, X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*4), A_ROW
MOVQ A_ROW, A_PTR
DECQ M
JNZ r4
r2:
TESTQ $2, M_DIM
JZ r1
// LOAD 2
LOAD_SCALED2
MOVQ N_DIM, N
SHRQ $KERNELSIZE, N
JZ r2c4
r2c8:
// 2x8 KERNEL
KERNEL_LOAD8
KERNEL_2x8
STORE_2x8
ADDQ $8*SIZE, Y_PTR
DECQ N
JNZ r2c8
r2c4:
TESTQ $4, N_DIM
JZ r2c2
// 2x4 KERNEL
KERNEL_LOAD4
KERNEL_2x4
STORE_2x4
ADDQ $4*SIZE, Y_PTR
r2c2:
TESTQ $2, N_DIM
JZ r2c1
// 2x2 KERNEL
KERNEL_LOAD2
KERNEL_2x2
STORE_2x2
ADDQ $2*SIZE, Y_PTR
r2c1:
TESTQ $1, N_DIM
JZ r2end
// 2x1 KERNEL
KERNEL_2x1
STORE_2x1
ADDQ $SIZE, Y_PTR
r2end:
ADDQ $2*SIZE, X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*2), A_ROW
MOVQ A_ROW, A_PTR
r1:
TESTQ $1, M_DIM
JZ end
// LOAD 1
LOAD_SCALED1
MOVQ N_DIM, N
SHRQ $KERNELSIZE, N
JZ r1c4
r1c8:
// 1x8 KERNEL
KERNEL_LOAD8
KERNEL_1x8
STORE_1x8
ADDQ $8*SIZE, Y_PTR
DECQ N
JNZ r1c8
r1c4:
TESTQ $4, N_DIM
JZ r1c2
// 1x4 KERNEL
KERNEL_LOAD4
KERNEL_1x4
STORE_1x4
ADDQ $4*SIZE, Y_PTR
r1c2:
TESTQ $2, N_DIM
JZ r1c1
// 1x2 KERNEL
KERNEL_LOAD2
KERNEL_1x2
STORE_1x2
ADDQ $2*SIZE, Y_PTR
r1c1:
TESTQ $1, N_DIM
JZ end
// 1x1 KERNEL
KERNEL_1x1
STORE_1x1
end:
RET
inc: // Algorithm for incY != 0 ( split loads in kernel )
MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float32)
SHLQ $BITSIZE, INC_X
MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float32)
SHLQ $BITSIZE, INC_Y
LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3
LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3
XORQ TMP2, TMP2
MOVQ M, TMP1
SUBQ $1, TMP1
IMULQ INC_X, TMP1
NEGQ TMP1
CMPQ INC_X, $0
CMOVQLT TMP1, TMP2
LEAQ (X_PTR)(TMP2*SIZE), X_PTR
XORQ TMP2, TMP2
MOVQ N, TMP1
SUBQ $1, TMP1
IMULQ INC_Y, TMP1
NEGQ TMP1
CMPQ INC_Y, $0
CMOVQLT TMP1, TMP2
LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR
SHRQ $2, M
JZ inc_r2
inc_r4:
// LOAD 4
LOAD_SCALED4_INC
MOVQ N_DIM, N
SHRQ $KERNELSIZE, N
JZ inc_r4c4
inc_r4c8:
// 4x4 KERNEL
KERNEL_LOAD8_INC
KERNEL_4x8
STORE_4x8
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ N
JNZ inc_r4c8
inc_r4c4:
TESTQ $4, N_DIM
JZ inc_r4c2
// 4x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_4x4
STORE_4x4
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
inc_r4c2:
TESTQ $2, N_DIM
JZ inc_r4c1
// 4x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_4x2
STORE_4x2
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_r4c1:
TESTQ $1, N_DIM
JZ inc_r4end
// 4x1 KERNEL
KERNEL_4x1
STORE_4x1
ADDQ INC_Y, Y_PTR
inc_r4end:
LEAQ (X_PTR)(INC_X*4), X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*4), A_ROW
MOVQ A_ROW, A_PTR
DECQ M
JNZ inc_r4
inc_r2:
TESTQ $2, M_DIM
JZ inc_r1
// LOAD 2
LOAD_SCALED2_INC
MOVQ N_DIM, N
SHRQ $KERNELSIZE, N
JZ inc_r2c4
inc_r2c8:
// 2x8 KERNEL
KERNEL_LOAD8_INC
KERNEL_2x8
STORE_2x8
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ N
JNZ inc_r2c8
inc_r2c4:
TESTQ $4, N_DIM
JZ inc_r2c2
// 2x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_2x4
STORE_2x4
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
inc_r2c2:
TESTQ $2, N_DIM
JZ inc_r2c1
// 2x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_2x2
STORE_2x2
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_r2c1:
TESTQ $1, N_DIM
JZ inc_r2end
// 2x1 KERNEL
KERNEL_2x1
STORE_2x1
ADDQ INC_Y, Y_PTR
inc_r2end:
LEAQ (X_PTR)(INC_X*2), X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*2), A_ROW
MOVQ A_ROW, A_PTR
inc_r1:
TESTQ $1, M_DIM
JZ end
// LOAD 1
LOAD_SCALED1
MOVQ N_DIM, N
SHRQ $KERNELSIZE, N
JZ inc_r1c4
inc_r1c8:
// 1x8 KERNEL
KERNEL_LOAD8_INC
KERNEL_1x8
STORE_1x8
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ N
JNZ inc_r1c8
inc_r1c4:
TESTQ $4, N_DIM
JZ inc_r1c2
// 1x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_1x4
STORE_1x4
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
inc_r1c2:
TESTQ $2, N_DIM
JZ inc_r1c1
// 1x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_1x2
STORE_1x2
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_r1c1:
TESTQ $1, N_DIM
JZ inc_end
// 1x1 KERNEL
KERNEL_1x1
STORE_1x1
inc_end:
RET

36
vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 noasm appengine safe
package f32
// Ger performs the rank-one operation
// A += alpha * x * y^T
// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar.
func Ger(m, n uintptr, alpha float32, x []float32, incX uintptr, y []float32, incY uintptr, a []float32, lda uintptr) {
if incX == 1 && incY == 1 {
x = x[:m]
y = y[:n]
for i, xv := range x {
AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n])
}
return
}
var ky, kx uintptr
if int(incY) < 0 {
ky = uintptr(-int(n-1) * int(incY))
}
if int(incX) < 0 {
kx = uintptr(-int(m-1) * int(incX))
}
ix := kx
for i := 0; i < int(m); i++ {
AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], uintptr(n), uintptr(incY), 1, uintptr(ky), 0)
ix += incX
}
}

55
vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go generated vendored Normal file
View File

@@ -0,0 +1,55 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package f32
// ScalUnitary is
// for i := range x {
// x[i] *= alpha
// }
func ScalUnitary(alpha float32, x []float32) {
for i := range x {
x[i] *= alpha
}
}
// ScalUnitaryTo is
// for i, v := range x {
// dst[i] = alpha * v
// }
func ScalUnitaryTo(dst []float32, alpha float32, x []float32) {
for i, v := range x {
dst[i] = alpha * v
}
}
// ScalInc is
// var ix uintptr
// for i := 0; i < int(n); i++ {
// x[ix] *= alpha
// ix += incX
// }
func ScalInc(alpha float32, x []float32, n, incX uintptr) {
var ix uintptr
for i := 0; i < int(n); i++ {
x[ix] *= alpha
ix += incX
}
}
// ScalIncTo is
// var idst, ix uintptr
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha * x[ix]
// ix += incX
// idst += incDst
// }
func ScalIncTo(dst []float32, incDst uintptr, alpha float32, x []float32, n, incX uintptr) {
var idst, ix uintptr
for i := 0; i < int(n); i++ {
dst[idst] = alpha * x[ix]
ix += incX
idst += incDst
}
}

View File

@@ -0,0 +1,68 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
package f32
// AxpyUnitary is
// for i, v := range x {
// y[i] += alpha * v
// }
func AxpyUnitary(alpha float32, x, y []float32)
// AxpyUnitaryTo is
// for i, v := range x {
// dst[i] = alpha*v + y[i]
// }
func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32)
// AxpyInc is
// for i := 0; i < int(n); i++ {
// y[iy] += alpha * x[ix]
// ix += incX
// iy += incY
// }
func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr)
// AxpyIncTo is
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha*x[ix] + y[iy]
// ix += incX
// iy += incY
// idst += incDst
// }
func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr)
// DdotUnitary is
// for i, v := range x {
// sum += float64(y[i]) * float64(v)
// }
// return
func DdotUnitary(x, y []float32) (sum float64)
// DdotInc is
// for i := 0; i < int(n); i++ {
// sum += float64(y[iy]) * float64(x[ix])
// ix += incX
// iy += incY
// }
// return
func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64)
// DotUnitary is
// for i, v := range x {
// sum += y[i] * v
// }
// return sum
func DotUnitary(x, y []float32) (sum float32)
// DotInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * x[ix]
// ix += incX
// iy += incY
// }
// return sum
func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32)

View File

@@ -0,0 +1,113 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 noasm appengine safe
package f32
// AxpyUnitary is
// for i, v := range x {
// y[i] += alpha * v
// }
func AxpyUnitary(alpha float32, x, y []float32) {
for i, v := range x {
y[i] += alpha * v
}
}
// AxpyUnitaryTo is
// for i, v := range x {
// dst[i] = alpha*v + y[i]
// }
func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) {
for i, v := range x {
dst[i] = alpha*v + y[i]
}
}
// AxpyInc is
// for i := 0; i < int(n); i++ {
// y[iy] += alpha * x[ix]
// ix += incX
// iy += incY
// }
func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) {
for i := 0; i < int(n); i++ {
y[iy] += alpha * x[ix]
ix += incX
iy += incY
}
}
// AxpyIncTo is
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha*x[ix] + y[iy]
// ix += incX
// iy += incY
// idst += incDst
// }
func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) {
for i := 0; i < int(n); i++ {
dst[idst] = alpha*x[ix] + y[iy]
ix += incX
iy += incY
idst += incDst
}
}
// DotUnitary is
// for i, v := range x {
// sum += y[i] * v
// }
// return sum
func DotUnitary(x, y []float32) (sum float32) {
for i, v := range x {
sum += y[i] * v
}
return sum
}
// DotInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * x[ix]
// ix += incX
// iy += incY
// }
// return sum
func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) {
for i := 0; i < int(n); i++ {
sum += y[iy] * x[ix]
ix += incX
iy += incY
}
return sum
}
// DdotUnitary is
// for i, v := range x {
// sum += float64(y[i]) * float64(v)
// }
// return
func DdotUnitary(x, y []float32) (sum float64) {
for i, v := range x {
sum += float64(y[i]) * float64(v)
}
return
}
// DdotInc is
// for i := 0; i < int(n); i++ {
// sum += float64(y[iy]) * float64(x[ix])
// ix += incX
// iy += incY
// }
// return
func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) {
for i := 0; i < int(n); i++ {
sum += float64(y[iy]) * float64(x[ix])
ix += incX
iy += incY
}
return
}

View File

@@ -0,0 +1,82 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
// func L1Norm(x []float64) float64
TEXT ·L1Norm(SB), NOSPLIT, $0
MOVQ x_base+0(FP), SI // SI = &x
MOVQ x_len+8(FP), CX // CX = len(x)
XORQ AX, AX // i = 0
PXOR X0, X0 // p_sum_i = 0
PXOR X1, X1
PXOR X2, X2
PXOR X3, X3
PXOR X4, X4
PXOR X5, X5
PXOR X6, X6
PXOR X7, X7
CMPQ CX, $0 // if CX == 0 { return 0 }
JE absum_end
MOVQ CX, BX
ANDQ $7, BX // BX = len(x) % 8
SHRQ $3, CX // CX = floor( len(x) / 8 )
JZ absum_tail_start // if CX == 0 { goto absum_tail_start }
absum_loop: // do {
// p_sum += max( p_sum + x[i], p_sum - x[i] )
MOVUPS (SI)(AX*8), X8 // X_i = x[i:i+1]
MOVUPS 16(SI)(AX*8), X9
MOVUPS 32(SI)(AX*8), X10
MOVUPS 48(SI)(AX*8), X11
ADDPD X8, X0 // p_sum_i += X_i ( positive values )
ADDPD X9, X2
ADDPD X10, X4
ADDPD X11, X6
SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values )
SUBPD X9, X3
SUBPD X10, X5
SUBPD X11, X7
MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) )
MAXPD X3, X2
MAXPD X5, X4
MAXPD X7, X6
MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i
MOVAPS X2, X3
MOVAPS X4, X5
MOVAPS X6, X7
ADDQ $8, AX // i += 8
LOOP absum_loop // } while --CX > 0
// p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) )
ADDPD X3, X0
ADDPD X5, X7
ADDPD X7, X0
// p_sum_0[0] = p_sum_0[0] + p_sum_0[1]
MOVAPS X0, X1
SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 )
ADDSD X1, X0
CMPQ BX, $0
JE absum_end // if BX == 0 { goto absum_end }
absum_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
XORPS X8, X8 // X_8 = 0
absum_tail: // do {
// p_sum += max( p_sum + x[i], p_sum - x[i] )
MOVSD (SI)(AX*8), X8 // X_8 = x[i]
MOVSD X0, X1 // p_sum_1 = p_sum_0
ADDSD X8, X0 // p_sum_0 += X_8
SUBSD X8, X1 // p_sum_1 -= X_8
MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 )
INCQ AX // i++
LOOP absum_tail // } while --CX > 0
absum_end: // return p_sum_0
MOVSD X0, sum+24(FP)
RET

View File

@@ -0,0 +1,90 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
// func L1NormInc(x []float64, n, incX int) (sum float64)
TEXT ·L1NormInc(SB), NOSPLIT, $0
MOVQ x_base+0(FP), SI // SI = &x
MOVQ n+24(FP), CX // CX = n
MOVQ incX+32(FP), AX // AX = increment * sizeof( float64 )
SHLQ $3, AX
MOVQ AX, DX // DX = AX * 3
IMULQ $3, DX
PXOR X0, X0 // p_sum_i = 0
PXOR X1, X1
PXOR X2, X2
PXOR X3, X3
PXOR X4, X4
PXOR X5, X5
PXOR X6, X6
PXOR X7, X7
CMPQ CX, $0 // if CX == 0 { return 0 }
JE absum_end
MOVQ CX, BX
ANDQ $7, BX // BX = n % 8
SHRQ $3, CX // CX = floor( n / 8 )
JZ absum_tail_start // if CX == 0 { goto absum_tail_start }
absum_loop: // do {
// p_sum = max( p_sum + x[i], p_sum - x[i] )
MOVSD (SI), X8 // X_i[0] = x[i]
MOVSD (SI)(AX*1), X9
MOVSD (SI)(AX*2), X10
MOVSD (SI)(DX*1), X11
LEAQ (SI)(AX*4), SI // SI = SI + 4
MOVHPD (SI), X8 // X_i[1] = x[i+4]
MOVHPD (SI)(AX*1), X9
MOVHPD (SI)(AX*2), X10
MOVHPD (SI)(DX*1), X11
ADDPD X8, X0 // p_sum_i += X_i ( positive values )
ADDPD X9, X2
ADDPD X10, X4
ADDPD X11, X6
SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values )
SUBPD X9, X3
SUBPD X10, X5
SUBPD X11, X7
MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) )
MAXPD X3, X2
MAXPD X5, X4
MAXPD X7, X6
MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i
MOVAPS X2, X3
MOVAPS X4, X5
MOVAPS X6, X7
LEAQ (SI)(AX*4), SI // SI = SI + 4
LOOP absum_loop // } while --CX > 0
// p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) )
ADDPD X3, X0
ADDPD X5, X7
ADDPD X7, X0
// p_sum_0[0] = p_sum_0[0] + p_sum_0[1]
MOVAPS X0, X1
SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 )
ADDSD X1, X0
CMPQ BX, $0
JE absum_end // if BX == 0 { goto absum_end }
absum_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
XORPS X8, X8 // X_8 = 0
absum_tail: // do {
// p_sum += max( p_sum + x[i], p_sum - x[i] )
MOVSD (SI), X8 // X_8 = x[i]
MOVSD X0, X1 // p_sum_1 = p_sum_0
ADDSD X8, X0 // p_sum_0 += X_8
SUBSD X8, X1 // p_sum_1 -= X_8
MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 )
ADDQ AX, SI // i++
LOOP absum_tail // } while --CX > 0
absum_end: // return p_sum_0
MOVSD X0, sum+40(FP)
RET

66
vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,66 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
// func Add(dst, s []float64)
TEXT ·Add(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ dst_len+8(FP), CX // CX = len(dst)
MOVQ s_base+24(FP), SI // SI = &s
CMPQ s_len+32(FP), CX // CX = max( CX, len(s) )
CMOVQLE s_len+32(FP), CX
CMPQ CX, $0 // if CX == 0 { return }
JE add_end
XORQ AX, AX
MOVQ DI, BX
ANDQ $0x0F, BX // BX = &dst & 15
JZ add_no_trim // if BX == 0 { goto add_no_trim }
// Align on 16-bit boundary
MOVSD (SI)(AX*8), X0 // X0 = s[i]
ADDSD (DI)(AX*8), X0 // X0 += dst[i]
MOVSD X0, (DI)(AX*8) // dst[i] = X0
INCQ AX // i++
DECQ CX // --CX
JE add_end // if CX == 0 { return }
add_no_trim:
MOVQ CX, BX
ANDQ $7, BX // BX = len(dst) % 8
SHRQ $3, CX // CX = floor( len(dst) / 8 )
JZ add_tail_start // if CX == 0 { goto add_tail_start }
add_loop: // Loop unrolled 8x do {
MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1]
MOVUPS 16(SI)(AX*8), X1
MOVUPS 32(SI)(AX*8), X2
MOVUPS 48(SI)(AX*8), X3
ADDPD (DI)(AX*8), X0 // X_i += dst[i:i+1]
ADDPD 16(DI)(AX*8), X1
ADDPD 32(DI)(AX*8), X2
ADDPD 48(DI)(AX*8), X3
MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X_i
MOVUPS X1, 16(DI)(AX*8)
MOVUPS X2, 32(DI)(AX*8)
MOVUPS X3, 48(DI)(AX*8)
ADDQ $8, AX // i += 8
LOOP add_loop // } while --CX > 0
CMPQ BX, $0 // if BX == 0 { return }
JE add_end
add_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
add_tail: // do {
MOVSD (SI)(AX*8), X0 // X0 = s[i]
ADDSD (DI)(AX*8), X0 // X0 += dst[i]
MOVSD X0, (DI)(AX*8) // dst[i] = X0
INCQ AX // ++i
LOOP add_tail // } while --CX > 0
add_end:
RET

View File

@@ -0,0 +1,53 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
// func Addconst(alpha float64, x []float64)
TEXT ·AddConst(SB), NOSPLIT, $0
MOVQ x_base+8(FP), SI // SI = &x
MOVQ x_len+16(FP), CX // CX = len(x)
CMPQ CX, $0 // if len(x) == 0 { return }
JE ac_end
MOVSD alpha+0(FP), X4 // X4 = { a, a }
SHUFPD $0, X4, X4
MOVUPS X4, X5 // X5 = X4
XORQ AX, AX // i = 0
MOVQ CX, BX
ANDQ $7, BX // BX = len(x) % 8
SHRQ $3, CX // CX = floor( len(x) / 8 )
JZ ac_tail_start // if CX == 0 { goto ac_tail_start }
ac_loop: // Loop unrolled 8x do {
MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1]
MOVUPS 16(SI)(AX*8), X1
MOVUPS 32(SI)(AX*8), X2
MOVUPS 48(SI)(AX*8), X3
ADDPD X4, X0 // X_i += a
ADDPD X5, X1
ADDPD X4, X2
ADDPD X5, X3
MOVUPS X0, (SI)(AX*8) // s[i:i+1] = X_i
MOVUPS X1, 16(SI)(AX*8)
MOVUPS X2, 32(SI)(AX*8)
MOVUPS X3, 48(SI)(AX*8)
ADDQ $8, AX // i += 8
LOOP ac_loop // } while --CX > 0
CMPQ BX, $0 // if BX == 0 { return }
JE ac_end
ac_tail_start: // Reset loop counters
MOVQ BX, CX // Loop counter: CX = BX
ac_tail: // do {
MOVSD (SI)(AX*8), X0 // X0 = s[i]
ADDSD X4, X0 // X0 += a
MOVSD X0, (SI)(AX*8) // s[i] = X0
INCQ AX // ++i
LOOP ac_tail // } while --CX > 0
ac_end:
RET

57
vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go generated vendored Normal file
View File

@@ -0,0 +1,57 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 noasm appengine safe
package f64
// AxpyUnitary is
// for i, v := range x {
// y[i] += alpha * v
// }
func AxpyUnitary(alpha float64, x, y []float64) {
for i, v := range x {
y[i] += alpha * v
}
}
// AxpyUnitaryTo is
// for i, v := range x {
// dst[i] = alpha*v + y[i]
// }
func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) {
for i, v := range x {
dst[i] = alpha*v + y[i]
}
}
// AxpyInc is
// for i := 0; i < int(n); i++ {
// y[iy] += alpha * x[ix]
// ix += incX
// iy += incY
// }
func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) {
for i := 0; i < int(n); i++ {
y[iy] += alpha * x[ix]
ix += incX
iy += incY
}
}
// AxpyIncTo is
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha*x[ix] + y[iy]
// ix += incX
// iy += incY
// idst += incDst
// }
func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) {
for i := 0; i < int(n); i++ {
dst[idst] = alpha*x[ix] + y[iy]
ix += incX
iy += incY
idst += incDst
}
}

View File

@@ -0,0 +1,142 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Some of the loop unrolling code is copied from:
// http://golang.org/src/math/big/arith_amd64.s
// which is distributed under these terms:
//
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define X_PTR SI
#define Y_PTR DI
#define DST_PTR DI
#define IDX AX
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R11
#define INC_Y R9
#define INCx3_Y R12
#define INC_DST R9
#define INCx3_DST R12
#define ALPHA X0
#define ALPHA_2 X1
// func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyInc(SB), NOSPLIT, $0
MOVQ x_base+8(FP), X_PTR // X_PTR = &x
MOVQ y_base+32(FP), Y_PTR // Y_PTR = &y
MOVQ n+56(FP), LEN // LEN = n
CMPQ LEN, $0 // if LEN == 0 { return }
JE end
MOVQ ix+80(FP), INC_X
MOVQ iy+88(FP), INC_Y
LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix])
LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(y[iy])
MOVQ Y_PTR, DST_PTR // DST_PTR = Y_PTR // Write pointer
MOVQ incX+64(FP), INC_X // INC_X = incX * sizeof(float64)
SHLQ $3, INC_X
MOVQ incY+72(FP), INC_Y // INC_Y = incY * sizeof(float64)
SHLQ $3, INC_Y
MOVSD alpha+0(FP), ALPHA // ALPHA = alpha
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = n % 4
SHRQ $2, LEN // LEN = floor( n / 4 )
JZ tail_start // if LEN == 0 { goto tail_start }
MOVAPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3
loop: // do { // y[i] += alpha * x[i] unrolled 4x.
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MOVSD (X_PTR)(INC_X*2), X4
MOVSD (X_PTR)(INCx3_X*1), X5
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA_2, X3
MULSD ALPHA, X4
MULSD ALPHA_2, X5
ADDSD (Y_PTR), X2 // X_i += y[i]
ADDSD (Y_PTR)(INC_Y*1), X3
ADDSD (Y_PTR)(INC_Y*2), X4
ADDSD (Y_PTR)(INCx3_Y*1), X5
MOVSD X2, (DST_PTR) // y[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
MOVSD X4, (DST_PTR)(INC_DST*2)
MOVSD X5, (DST_PTR)(INCx3_DST*1)
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4])
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE end
tail_start: // Reset Loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( LEN / 2 )
JZ tail_one
tail_two:
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA, X3
ADDSD (Y_PTR), X2 // X_i += y[i]
ADDSD (Y_PTR)(INC_Y*1), X3
MOVSD X2, (DST_PTR) // y[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2])
LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2])
ANDQ $1, TAIL
JZ end // if TAIL == 0 { goto end }
tail_one:
// y[i] += alpha * x[i] for the last n % 4 iterations.
MOVSD (X_PTR), X2 // X2 = x[i]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR), X2 // X2 += y[i]
MOVSD X2, (DST_PTR) // y[i] = X2
end:
RET

View File

@@ -0,0 +1,148 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Some of the loop unrolling code is copied from:
// http://golang.org/src/math/big/arith_amd64.s
// which is distributed under these terms:
//
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define X_PTR SI
#define Y_PTR DI
#define DST_PTR DX
#define IDX AX
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R11
#define INC_Y R9
#define INCx3_Y R12
#define INC_DST R10
#define INCx3_DST R13
#define ALPHA X0
#define ALPHA_2 X1
// func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyIncTo(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst
MOVQ x_base+48(FP), X_PTR // X_PTR := &x
MOVQ y_base+72(FP), Y_PTR // Y_PTR := &y
MOVQ n+96(FP), LEN // LEN := n
CMPQ LEN, $0 // if LEN == 0 { return }
JE end
MOVQ ix+120(FP), INC_X
LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix])
MOVQ iy+128(FP), INC_Y
LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(dst[idst])
MOVQ idst+32(FP), INC_DST
LEAQ (DST_PTR)(INC_DST*8), DST_PTR // DST_PTR = &(y[iy])
MOVQ incX+104(FP), INC_X // INC_X = incX * sizeof(float64)
SHLQ $3, INC_X
MOVQ incY+112(FP), INC_Y // INC_Y = incY * sizeof(float64)
SHLQ $3, INC_Y
MOVQ incDst+24(FP), INC_DST // INC_DST = incDst * sizeof(float64)
SHLQ $3, INC_DST
MOVSD alpha+40(FP), ALPHA
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = n % 4
SHRQ $2, LEN // LEN = floor( n / 4 )
JZ tail_start // if LEN == 0 { goto tail_start }
MOVSD ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3
LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3
loop: // do { // y[i] += alpha * x[i] unrolled 2x.
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MOVSD (X_PTR)(INC_X*2), X4
MOVSD (X_PTR)(INCx3_X*1), X5
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA_2, X3
MULSD ALPHA, X4
MULSD ALPHA_2, X5
ADDSD (Y_PTR), X2 // X_i += y[i]
ADDSD (Y_PTR)(INC_Y*1), X3
ADDSD (Y_PTR)(INC_Y*2), X4
ADDSD (Y_PTR)(INCx3_Y*1), X5
MOVSD X2, (DST_PTR) // y[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
MOVSD X4, (DST_PTR)(INC_DST*2)
MOVSD X5, (DST_PTR)(INCx3_DST*1)
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4])
LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4]
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE end
tail_start: // Reset Loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( LEN / 2 )
JZ tail_one
tail_two:
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA, X3
ADDSD (Y_PTR), X2 // X_i += y[i]
ADDSD (Y_PTR)(INC_Y*1), X3
MOVSD X2, (DST_PTR) // y[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2])
LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2])
LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incY*2]
ANDQ $1, TAIL
JZ end // if TAIL == 0 { goto end }
tail_one:
MOVSD (X_PTR), X2 // X2 = x[i]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR), X2 // X2 += y[i]
MOVSD X2, (DST_PTR) // y[i] = X2
end:
RET

View File

@@ -0,0 +1,134 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Some of the loop unrolling code is copied from:
// http://golang.org/src/math/big/arith_amd64.s
// which is distributed under these terms:
//
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define X_PTR SI
#define Y_PTR DI
#define DST_PTR DI
#define IDX AX
#define LEN CX
#define TAIL BX
#define ALPHA X0
#define ALPHA_2 X1
// func AxpyUnitary(alpha float64, x, y []float64)
TEXT ·AxpyUnitary(SB), NOSPLIT, $0
MOVQ x_base+8(FP), X_PTR // X_PTR := &x
MOVQ y_base+32(FP), Y_PTR // Y_PTR := &y
MOVQ x_len+16(FP), LEN // LEN = min( len(x), len(y) )
CMPQ y_len+40(FP), LEN
CMOVQLE y_len+40(FP), LEN
CMPQ LEN, $0 // if LEN == 0 { return }
JE end
XORQ IDX, IDX
MOVSD alpha+0(FP), ALPHA // ALPHA := { alpha, alpha }
SHUFPD $0, ALPHA, ALPHA
MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining
MOVQ Y_PTR, TAIL // Check memory alignment
ANDQ $15, TAIL // TAIL = &y % 16
JZ no_trim // if TAIL == 0 { goto no_trim }
// Align on 16-byte boundary
MOVSD (X_PTR), X2 // X2 := x[0]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR), X2 // X2 += y[0]
MOVSD X2, (DST_PTR) // y[0] = X2
INCQ IDX // i++
DECQ LEN // LEN--
JZ end // if LEN == 0 { return }
no_trim:
MOVQ LEN, TAIL
ANDQ $7, TAIL // TAIL := n % 8
SHRQ $3, LEN // LEN = floor( n / 8 )
JZ tail_start // if LEN == 0 { goto tail2_start }
loop: // do {
// y[i] += alpha * x[i] unrolled 8x.
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MOVUPS 16(X_PTR)(IDX*8), X3
MOVUPS 32(X_PTR)(IDX*8), X4
MOVUPS 48(X_PTR)(IDX*8), X5
MULPD ALPHA, X2 // X_i *= a
MULPD ALPHA_2, X3
MULPD ALPHA, X4
MULPD ALPHA_2, X5
ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i]
ADDPD 16(Y_PTR)(IDX*8), X3
ADDPD 32(Y_PTR)(IDX*8), X4
ADDPD 48(Y_PTR)(IDX*8), X5
MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i
MOVUPS X3, 16(DST_PTR)(IDX*8)
MOVUPS X4, 32(DST_PTR)(IDX*8)
MOVUPS X5, 48(DST_PTR)(IDX*8)
ADDQ $8, IDX // i += 8
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE end
tail_start: // Reset loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( TAIL / 2 )
JZ tail_one // if TAIL == 0 { goto tail }
tail_two: // do {
MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i]
MULPD ALPHA, X2 // X2 *= a
ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i]
MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2
ADDQ $2, IDX // i += 2
DECQ LEN
JNZ tail_two // } while --LEN > 0
ANDQ $1, TAIL
JZ end // if TAIL == 0 { goto end }
tail_one:
MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i]
MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2
end:
RET

View File

@@ -0,0 +1,140 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Some of the loop unrolling code is copied from:
// http://golang.org/src/math/big/arith_amd64.s
// which is distributed under these terms:
//
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define X_PTR SI
#define Y_PTR DX
#define DST_PTR DI
#define IDX AX
#define LEN CX
#define TAIL BX
#define ALPHA X0
#define ALPHA_2 X1
// func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64)
TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst
MOVQ x_base+32(FP), X_PTR // X_PTR := &x
MOVQ y_base+56(FP), Y_PTR // Y_PTR := &y
MOVQ x_len+40(FP), LEN // LEN = min( len(x), len(y), len(dst) )
CMPQ y_len+64(FP), LEN
CMOVQLE y_len+64(FP), LEN
CMPQ dst_len+8(FP), LEN
CMOVQLE dst_len+8(FP), LEN
CMPQ LEN, $0
JE end // if LEN == 0 { return }
XORQ IDX, IDX // IDX = 0
MOVSD alpha+24(FP), ALPHA
SHUFPD $0, ALPHA, ALPHA // ALPHA := { alpha, alpha }
MOVQ Y_PTR, TAIL // Check memory alignment
ANDQ $15, TAIL // TAIL = &y % 16
JZ no_trim // if TAIL == 0 { goto no_trim }
// Align on 16-byte boundary
MOVSD (X_PTR), X2 // X2 := x[0]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR), X2 // X2 += y[0]
MOVSD X2, (DST_PTR) // y[0] = X2
INCQ IDX // i++
DECQ LEN // LEN--
JZ end // if LEN == 0 { return }
no_trim:
MOVQ LEN, TAIL
ANDQ $7, TAIL // TAIL := n % 8
SHRQ $3, LEN // LEN = floor( n / 8 )
JZ tail_start // if LEN == 0 { goto tail_start }
MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining
loop: // do {
// y[i] += alpha * x[i] unrolled 8x.
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MOVUPS 16(X_PTR)(IDX*8), X3
MOVUPS 32(X_PTR)(IDX*8), X4
MOVUPS 48(X_PTR)(IDX*8), X5
MULPD ALPHA, X2 // X_i *= alpha
MULPD ALPHA_2, X3
MULPD ALPHA, X4
MULPD ALPHA_2, X5
ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i]
ADDPD 16(Y_PTR)(IDX*8), X3
ADDPD 32(Y_PTR)(IDX*8), X4
ADDPD 48(Y_PTR)(IDX*8), X5
MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i
MOVUPS X3, 16(DST_PTR)(IDX*8)
MOVUPS X4, 32(DST_PTR)(IDX*8)
MOVUPS X5, 48(DST_PTR)(IDX*8)
ADDQ $8, IDX // i += 8
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE end
tail_start: // Reset loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( TAIL / 2 )
JZ tail_one // if LEN == 0 { goto tail }
tail_two: // do {
MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i]
MULPD ALPHA, X2 // X2 *= alpha
ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i]
MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2
ADDQ $2, IDX // i += 2
DECQ LEN
JNZ tail_two // } while --LEN > 0
ANDQ $1, TAIL
JZ end // if TAIL == 0 { goto end }
tail_one:
MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i]
MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2
end:
RET

View File

@@ -0,0 +1,71 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
TEXT ·CumProd(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ dst_len+8(FP), CX // CX = len(dst)
MOVQ s_base+24(FP), SI // SI = &s
CMPQ s_len+32(FP), CX // CX = max( CX, len(s) )
CMOVQLE s_len+32(FP), CX
MOVQ CX, ret_len+56(FP) // len(ret) = CX
CMPQ CX, $0 // if CX == 0 { return }
JE cp_end
XORQ AX, AX // i = 0
MOVSD (SI), X5 // p_prod = { s[0], s[0] }
SHUFPD $0, X5, X5
MOVSD X5, (DI) // dst[0] = s[0]
INCQ AX // ++i
DECQ CX // -- CX
JZ cp_end // if CX == 0 { return }
MOVQ CX, BX
ANDQ $3, BX // BX = CX % 4
SHRQ $2, CX // CX = floor( CX / 4 )
JZ cp_tail_start // if CX == 0 { goto cp_tail_start }
cp_loop: // Loop unrolled 4x do {
MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1]
MOVUPS 16(SI)(AX*8), X2
MOVAPS X0, X1 // X1 = X0
MOVAPS X2, X3
SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] }
SHUFPD $1, X3, X3
MULPD X0, X1 // X1 *= X0
MULPD X2, X3
SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] }
SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] }
SHUFPD $2, X3, X2
SHUFPD $3, X3, X3
MULPD X5, X0 // X0 *= p_prod
MULPD X1, X5 // p_prod *= X1
MULPD X5, X2
MOVUPS X0, (DI)(AX*8) // dst[i] = X0
MOVUPS X2, 16(DI)(AX*8)
MULPD X3, X5
ADDQ $4, AX // i += 4
LOOP cp_loop // } while --CX > 0
// if BX == 0 { return }
CMPQ BX, $0
JE cp_end
cp_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
cp_tail: // do {
MULSD (SI)(AX*8), X5 // p_prod *= s[i]
MOVSD X5, (DI)(AX*8) // dst[i] = p_prod
INCQ AX // ++i
LOOP cp_tail // } while --CX > 0
cp_end:
MOVQ DI, ret_base+48(FP) // &ret = &dst
MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst)
MOVQ SI, ret_cap+64(FP)
RET

View File

@@ -0,0 +1,64 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
TEXT ·CumSum(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ dst_len+8(FP), CX // CX = len(dst)
MOVQ s_base+24(FP), SI // SI = &s
CMPQ s_len+32(FP), CX // CX = max( CX, len(s) )
CMOVQLE s_len+32(FP), CX
MOVQ CX, ret_len+56(FP) // len(ret) = CX
CMPQ CX, $0 // if CX == 0 { return }
JE cs_end
XORQ AX, AX // i = 0
PXOR X5, X5 // p_sum = 0
MOVQ CX, BX
ANDQ $3, BX // BX = CX % 4
SHRQ $2, CX // CX = floor( CX / 4 )
JZ cs_tail_start // if CX == 0 { goto cs_tail_start }
cs_loop: // Loop unrolled 4x do {
MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1]
MOVUPS 16(SI)(AX*8), X2
MOVAPS X0, X1 // X1 = X0
MOVAPS X2, X3
SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] }
SHUFPD $1, X3, X3
ADDPD X0, X1 // X1 += X0
ADDPD X2, X3
SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] }
SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] }
SHUFPD $2, X3, X2
SHUFPD $3, X3, X3
ADDPD X5, X0 // X0 += p_sum
ADDPD X1, X5 // p_sum += X1
ADDPD X5, X2
MOVUPS X0, (DI)(AX*8) // dst[i] = X0
MOVUPS X2, 16(DI)(AX*8)
ADDPD X3, X5
ADDQ $4, AX // i += 4
LOOP cs_loop // } while --CX > 0
// if BX == 0 { return }
CMPQ BX, $0
JE cs_end
cs_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
cs_tail: // do {
ADDSD (SI)(AX*8), X5 // p_sum *= s[i]
MOVSD X5, (DI)(AX*8) // dst[i] = p_sum
INCQ AX // ++i
LOOP cs_tail // } while --CX > 0
cs_end:
MOVQ DI, ret_base+48(FP) // &ret = &dst
MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst)
MOVQ SI, ret_cap+64(FP)
RET

67
vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,67 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
// func Div(dst, s []float64)
TEXT ·Div(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ dst_len+8(FP), CX // CX = len(dst)
MOVQ s_base+24(FP), SI // SI = &s
CMPQ s_len+32(FP), CX // CX = max( CX, len(s) )
CMOVQLE s_len+32(FP), CX
CMPQ CX, $0 // if CX == 0 { return }
JE div_end
XORQ AX, AX // i = 0
MOVQ SI, BX
ANDQ $15, BX // BX = &s & 15
JZ div_no_trim // if BX == 0 { goto div_no_trim }
// Align on 16-bit boundary
MOVSD (DI)(AX*8), X0 // X0 = dst[i]
DIVSD (SI)(AX*8), X0 // X0 /= s[i]
MOVSD X0, (DI)(AX*8) // dst[i] = X0
INCQ AX // ++i
DECQ CX // --CX
JZ div_end // if CX == 0 { return }
div_no_trim:
MOVQ CX, BX
ANDQ $7, BX // BX = len(dst) % 8
SHRQ $3, CX // CX = floor( len(dst) / 8 )
JZ div_tail_start // if CX == 0 { goto div_tail_start }
div_loop: // Loop unrolled 8x do {
MOVUPS (DI)(AX*8), X0 // X0 = dst[i:i+1]
MOVUPS 16(DI)(AX*8), X1
MOVUPS 32(DI)(AX*8), X2
MOVUPS 48(DI)(AX*8), X3
DIVPD (SI)(AX*8), X0 // X0 /= s[i:i+1]
DIVPD 16(SI)(AX*8), X1
DIVPD 32(SI)(AX*8), X2
DIVPD 48(SI)(AX*8), X3
MOVUPS X0, (DI)(AX*8) // dst[i] = X0
MOVUPS X1, 16(DI)(AX*8)
MOVUPS X2, 32(DI)(AX*8)
MOVUPS X3, 48(DI)(AX*8)
ADDQ $8, AX // i += 8
LOOP div_loop // } while --CX > 0
CMPQ BX, $0 // if BX == 0 { return }
JE div_end
div_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
div_tail: // do {
MOVSD (DI)(AX*8), X0 // X0 = dst[i]
DIVSD (SI)(AX*8), X0 // X0 /= s[i]
MOVSD X0, (DI)(AX*8) // dst[i] = X0
INCQ AX // ++i
LOOP div_tail // } while --CX > 0
div_end:
RET

View File

@@ -0,0 +1,73 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
// func DivTo(dst, x, y []float64)
TEXT ·DivTo(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ dst_len+8(FP), CX // CX = len(dst)
MOVQ x_base+24(FP), SI // SI = &x
MOVQ y_base+48(FP), DX // DX = &y
CMPQ x_len+32(FP), CX // CX = max( len(dst), len(x), len(y) )
CMOVQLE x_len+32(FP), CX
CMPQ y_len+56(FP), CX
CMOVQLE y_len+56(FP), CX
MOVQ CX, ret_len+80(FP) // len(ret) = CX
CMPQ CX, $0 // if CX == 0 { return }
JE div_end
XORQ AX, AX // i = 0
MOVQ DX, BX
ANDQ $15, BX // BX = &y & OxF
JZ div_no_trim // if BX == 0 { goto div_no_trim }
// Align on 16-bit boundary
MOVSD (SI)(AX*8), X0 // X0 = s[i]
DIVSD (DX)(AX*8), X0 // X0 /= t[i]
MOVSD X0, (DI)(AX*8) // dst[i] = X0
INCQ AX // ++i
DECQ CX // --CX
JZ div_end // if CX == 0 { return }
div_no_trim:
MOVQ CX, BX
ANDQ $7, BX // BX = len(dst) % 8
SHRQ $3, CX // CX = floor( len(dst) / 8 )
JZ div_tail_start // if CX == 0 { goto div_tail_start }
div_loop: // Loop unrolled 8x do {
MOVUPS (SI)(AX*8), X0 // X0 = x[i:i+1]
MOVUPS 16(SI)(AX*8), X1
MOVUPS 32(SI)(AX*8), X2
MOVUPS 48(SI)(AX*8), X3
DIVPD (DX)(AX*8), X0 // X0 /= y[i:i+1]
DIVPD 16(DX)(AX*8), X1
DIVPD 32(DX)(AX*8), X2
DIVPD 48(DX)(AX*8), X3
MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X0
MOVUPS X1, 16(DI)(AX*8)
MOVUPS X2, 32(DI)(AX*8)
MOVUPS X3, 48(DI)(AX*8)
ADDQ $8, AX // i += 8
LOOP div_loop // } while --CX > 0
CMPQ BX, $0 // if BX == 0 { return }
JE div_end
div_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
div_tail: // do {
MOVSD (SI)(AX*8), X0 // X0 = x[i]
DIVSD (DX)(AX*8), X0 // X0 /= y[i]
MOVSD X0, (DI)(AX*8)
INCQ AX // ++i
LOOP div_tail // } while --CX > 0
div_end:
MOVQ DI, ret_base+72(FP) // &ret = &dst
MOVQ dst_cap+16(FP), DI // cap(ret) = cap(dst)
MOVQ DI, ret_cap+88(FP)
RET

6
vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go generated vendored Normal file
View File

@@ -0,0 +1,6 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package f64 provides float64 vector primitives.
package f64 // import "gonum.org/v1/gonum/internal/asm/f64"

35
vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go generated vendored Normal file
View File

@@ -0,0 +1,35 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 noasm appengine safe
package f64
// DotUnitary is
// for i, v := range x {
// sum += y[i] * v
// }
// return sum
func DotUnitary(x, y []float64) (sum float64) {
for i, v := range x {
sum += y[i] * v
}
return sum
}
// DotInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * x[ix]
// ix += incX
// iy += incY
// }
// return sum
func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) {
for i := 0; i < int(n); i++ {
sum += y[iy] * x[ix]
ix += incX
iy += incY
}
return sum
}

145
vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,145 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Some of the loop unrolling code is copied from:
// http://golang.org/src/math/big/arith_amd64.s
// which is distributed under these terms:
//
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//+build !noasm,!appengine,!safe
#include "textflag.h"
// func DdotUnitary(x, y []float64) (sum float64)
// This function assumes len(y) >= len(x).
TEXT ·DotUnitary(SB), NOSPLIT, $0
MOVQ x+0(FP), R8
MOVQ x_len+8(FP), DI // n = len(x)
MOVQ y+24(FP), R9
MOVSD $(0.0), X7 // sum = 0
MOVSD $(0.0), X8 // sum = 0
MOVQ $0, SI // i = 0
SUBQ $4, DI // n -= 4
JL tail_uni // if n < 0 goto tail_uni
loop_uni:
// sum += x[i] * y[i] unrolled 4x.
MOVUPD 0(R8)(SI*8), X0
MOVUPD 0(R9)(SI*8), X1
MOVUPD 16(R8)(SI*8), X2
MOVUPD 16(R9)(SI*8), X3
MULPD X1, X0
MULPD X3, X2
ADDPD X0, X7
ADDPD X2, X8
ADDQ $4, SI // i += 4
SUBQ $4, DI // n -= 4
JGE loop_uni // if n >= 0 goto loop_uni
tail_uni:
ADDQ $4, DI // n += 4
JLE end_uni // if n <= 0 goto end_uni
onemore_uni:
// sum += x[i] * y[i] for the remaining 1-3 elements.
MOVSD 0(R8)(SI*8), X0
MOVSD 0(R9)(SI*8), X1
MULSD X1, X0
ADDSD X0, X7
ADDQ $1, SI // i++
SUBQ $1, DI // n--
JNZ onemore_uni // if n != 0 goto onemore_uni
end_uni:
// Add the four sums together.
ADDPD X8, X7
MOVSD X7, X0
UNPCKHPD X7, X7
ADDSD X0, X7
MOVSD X7, sum+48(FP) // Return final sum.
RET
// func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64)
TEXT ·DotInc(SB), NOSPLIT, $0
MOVQ x+0(FP), R8
MOVQ y+24(FP), R9
MOVQ n+48(FP), CX
MOVQ incX+56(FP), R11
MOVQ incY+64(FP), R12
MOVQ ix+72(FP), R13
MOVQ iy+80(FP), R14
MOVSD $(0.0), X7 // sum = 0
LEAQ (R8)(R13*8), SI // p = &x[ix]
LEAQ (R9)(R14*8), DI // q = &y[ix]
SHLQ $3, R11 // incX *= sizeof(float64)
SHLQ $3, R12 // indY *= sizeof(float64)
SUBQ $2, CX // n -= 2
JL tail_inc // if n < 0 goto tail_inc
loop_inc:
// sum += *p * *q unrolled 2x.
MOVHPD (SI), X0
MOVHPD (DI), X1
ADDQ R11, SI // p += incX
ADDQ R12, DI // q += incY
MOVLPD (SI), X0
MOVLPD (DI), X1
ADDQ R11, SI // p += incX
ADDQ R12, DI // q += incY
MULPD X1, X0
ADDPD X0, X7
SUBQ $2, CX // n -= 2
JGE loop_inc // if n >= 0 goto loop_inc
tail_inc:
ADDQ $2, CX // n += 2
JLE end_inc // if n <= 0 goto end_inc
// sum += *p * *q for the last iteration if n is odd.
MOVSD (SI), X0
MULSD (DI), X0
ADDSD X0, X7
end_inc:
// Add the two sums together.
MOVSD X7, X0
UNPCKHPD X7, X7
ADDSD X0, X7
MOVSD X7, sum+88(FP) // Return final sum.
RET

22
vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
package f64
// Ger performs the rank-one operation
// A += alpha * x * y^T
// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar.
func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr)
// GemvN computes
// y = alpha * A * x + beta * y
// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars.
func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr)
// GemvT computes
// y = alpha * A^T * x + beta * y
// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars.
func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr)

118
vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go generated vendored Normal file
View File

@@ -0,0 +1,118 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 noasm appengine safe
package f64
// Ger performs the rank-one operation
// A += alpha * x * y^T
// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar.
func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) {
if incX == 1 && incY == 1 {
x = x[:m]
y = y[:n]
for i, xv := range x {
AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n])
}
return
}
var ky, kx uintptr
if int(incY) < 0 {
ky = uintptr(-int(n-1) * int(incY))
}
if int(incX) < 0 {
kx = uintptr(-int(m-1) * int(incX))
}
ix := kx
for i := 0; i < int(m); i++ {
AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], n, incY, 1, ky, 0)
ix += incX
}
}
// GemvN computes
// y = alpha * A * x + beta * y
// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars.
func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) {
var kx, ky, i uintptr
if int(incX) < 0 {
kx = uintptr(-int(n-1) * int(incX))
}
if int(incY) < 0 {
ky = uintptr(-int(m-1) * int(incY))
}
if incX == 1 && incY == 1 {
if beta == 0 {
for i = 0; i < m; i++ {
y[i] = alpha * DotUnitary(a[lda*i:lda*i+n], x)
}
return
}
for i = 0; i < m; i++ {
y[i] = y[i]*beta + alpha*DotUnitary(a[lda*i:lda*i+n], x)
}
return
}
iy := ky
if beta == 0 {
for i = 0; i < m; i++ {
y[iy] = alpha * DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0)
iy += incY
}
return
}
for i = 0; i < m; i++ {
y[iy] = y[iy]*beta + alpha*DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0)
iy += incY
}
}
// GemvT computes
// y = alpha * A^T * x + beta * y
// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars.
func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) {
var kx, ky, i uintptr
if int(incX) < 0 {
kx = uintptr(-int(m-1) * int(incX))
}
if int(incY) < 0 {
ky = uintptr(-int(n-1) * int(incY))
}
switch {
case beta == 0: // beta == 0 is special-cased to memclear
if incY == 1 {
for i := range y {
y[i] = 0
}
} else {
iy := ky
for i := 0; i < int(n); i++ {
y[iy] = 0
iy += incY
}
}
case int(incY) < 0:
ScalInc(beta, y, n, uintptr(int(-incY)))
case incY == 1:
ScalUnitary(beta, y[:n])
default:
ScalInc(beta, y, n, incY)
}
if incX == 1 && incY == 1 {
for i = 0; i < m; i++ {
AxpyUnitaryTo(y, alpha*x[i], a[lda*i:lda*i+n], y)
}
return
}
ix := kx
for i = 0; i < m; i++ {
AxpyInc(alpha*x[ix], a[lda*i:lda*i+n], y, n, 1, incY, 0, ky)
ix += incX
}
}

View File

@@ -0,0 +1,685 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define SIZE 8
#define M_DIM m+0(FP)
#define M CX
#define N_DIM n+8(FP)
#define N BX
#define TMP1 R14
#define TMP2 R15
#define X_PTR SI
#define X x_base+56(FP)
#define INC_X R8
#define INC3_X R9
#define Y_PTR DX
#define Y y_base+96(FP)
#define INC_Y R10
#define INC3_Y R11
#define A_ROW AX
#define A_PTR DI
#define LDA R12
#define LDA3 R13
#define ALPHA X15
#define BETA X14
#define INIT4 \
XORPS X0, X0 \
XORPS X1, X1 \
XORPS X2, X2 \
XORPS X3, X3
#define INIT2 \
XORPS X0, X0 \
XORPS X1, X1
#define INIT1 \
XORPS X0, X0
#define KERNEL_LOAD4 \
MOVUPS (X_PTR), X12 \
MOVUPS 2*SIZE(X_PTR), X13
#define KERNEL_LOAD2 \
MOVUPS (X_PTR), X12
#define KERNEL_LOAD4_INC \
MOVSD (X_PTR), X12 \
MOVHPD (X_PTR)(INC_X*1), X12 \
MOVSD (X_PTR)(INC_X*2), X13 \
MOVHPD (X_PTR)(INC3_X*1), X13
#define KERNEL_LOAD2_INC \
MOVSD (X_PTR), X12 \
MOVHPD (X_PTR)(INC_X*1), X12
#define KERNEL_4x4 \
MOVUPS (A_PTR), X4 \
MOVUPS 2*SIZE(A_PTR), X5 \
MOVUPS (A_PTR)(LDA*1), X6 \
MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \
MOVUPS (A_PTR)(LDA*2), X8 \
MOVUPS 2*SIZE(A_PTR)(LDA*2), X9 \
MOVUPS (A_PTR)(LDA3*1), X10 \
MOVUPS 2*SIZE(A_PTR)(LDA3*1), X11 \
MULPD X12, X4 \
MULPD X13, X5 \
MULPD X12, X6 \
MULPD X13, X7 \
MULPD X12, X8 \
MULPD X13, X9 \
MULPD X12, X10 \
MULPD X13, X11 \
ADDPD X4, X0 \
ADDPD X5, X0 \
ADDPD X6, X1 \
ADDPD X7, X1 \
ADDPD X8, X2 \
ADDPD X9, X2 \
ADDPD X10, X3 \
ADDPD X11, X3 \
ADDQ $4*SIZE, A_PTR
#define KERNEL_4x2 \
MOVUPS (A_PTR), X4 \
MOVUPS (A_PTR)(LDA*1), X5 \
MOVUPS (A_PTR)(LDA*2), X6 \
MOVUPS (A_PTR)(LDA3*1), X7 \
MULPD X12, X4 \
MULPD X12, X5 \
MULPD X12, X6 \
MULPD X12, X7 \
ADDPD X4, X0 \
ADDPD X5, X1 \
ADDPD X6, X2 \
ADDPD X7, X3 \
ADDQ $2*SIZE, A_PTR
#define KERNEL_4x1 \
MOVDDUP (X_PTR), X12 \
MOVSD (A_PTR), X4 \
MOVHPD (A_PTR)(LDA*1), X4 \
MOVSD (A_PTR)(LDA*2), X5 \
MOVHPD (A_PTR)(LDA3*1), X5 \
MULPD X12, X4 \
MULPD X12, X5 \
ADDPD X4, X0 \
ADDPD X5, X2 \
ADDQ $SIZE, A_PTR
#define STORE4 \
MOVUPS (Y_PTR), X4 \
MOVUPS 2*SIZE(Y_PTR), X5 \
MULPD ALPHA, X0 \
MULPD ALPHA, X2 \
MULPD BETA, X4 \
MULPD BETA, X5 \
ADDPD X0, X4 \
ADDPD X2, X5 \
MOVUPS X4, (Y_PTR) \
MOVUPS X5, 2*SIZE(Y_PTR)
#define STORE4_INC \
MOVSD (Y_PTR), X4 \
MOVHPD (Y_PTR)(INC_Y*1), X4 \
MOVSD (Y_PTR)(INC_Y*2), X5 \
MOVHPD (Y_PTR)(INC3_Y*1), X5 \
MULPD ALPHA, X0 \
MULPD ALPHA, X2 \
MULPD BETA, X4 \
MULPD BETA, X5 \
ADDPD X0, X4 \
ADDPD X2, X5 \
MOVLPD X4, (Y_PTR) \
MOVHPD X4, (Y_PTR)(INC_Y*1) \
MOVLPD X5, (Y_PTR)(INC_Y*2) \
MOVHPD X5, (Y_PTR)(INC3_Y*1)
#define KERNEL_2x4 \
MOVUPS (A_PTR), X8 \
MOVUPS 2*SIZE(A_PTR), X9 \
MOVUPS (A_PTR)(LDA*1), X10 \
MOVUPS 2*SIZE(A_PTR)(LDA*1), X11 \
MULPD X12, X8 \
MULPD X13, X9 \
MULPD X12, X10 \
MULPD X13, X11 \
ADDPD X8, X0 \
ADDPD X10, X1 \
ADDPD X9, X0 \
ADDPD X11, X1 \
ADDQ $4*SIZE, A_PTR
#define KERNEL_2x2 \
MOVUPS (A_PTR), X8 \
MOVUPS (A_PTR)(LDA*1), X9 \
MULPD X12, X8 \
MULPD X12, X9 \
ADDPD X8, X0 \
ADDPD X9, X1 \
ADDQ $2*SIZE, A_PTR
#define KERNEL_2x1 \
MOVDDUP (X_PTR), X12 \
MOVSD (A_PTR), X8 \
MOVHPD (A_PTR)(LDA*1), X8 \
MULPD X12, X8 \
ADDPD X8, X0 \
ADDQ $SIZE, A_PTR
#define STORE2 \
MOVUPS (Y_PTR), X4 \
MULPD ALPHA, X0 \
MULPD BETA, X4 \
ADDPD X0, X4 \
MOVUPS X4, (Y_PTR)
#define STORE2_INC \
MOVSD (Y_PTR), X4 \
MOVHPD (Y_PTR)(INC_Y*1), X4 \
MULPD ALPHA, X0 \
MULPD BETA, X4 \
ADDPD X0, X4 \
MOVSD X4, (Y_PTR) \
MOVHPD X4, (Y_PTR)(INC_Y*1)
#define KERNEL_1x4 \
MOVUPS (A_PTR), X8 \
MOVUPS 2*SIZE(A_PTR), X9 \
MULPD X12, X8 \
MULPD X13, X9 \
ADDPD X8, X0 \
ADDPD X9, X0 \
ADDQ $4*SIZE, A_PTR
#define KERNEL_1x2 \
MOVUPS (A_PTR), X8 \
MULPD X12, X8 \
ADDPD X8, X0 \
ADDQ $2*SIZE, A_PTR
#define KERNEL_1x1 \
MOVSD (X_PTR), X12 \
MOVSD (A_PTR), X8 \
MULSD X12, X8 \
ADDSD X8, X0 \
ADDQ $SIZE, A_PTR
#define STORE1 \
HADDPD X0, X0 \
MOVSD (Y_PTR), X4 \
MULSD ALPHA, X0 \
MULSD BETA, X4 \
ADDSD X0, X4 \
MOVSD X4, (Y_PTR)
// func GemvN(m, n int,
// alpha float64,
// a []float64, lda int,
// x []float64, incX int,
// beta float64,
// y []float64, incY int)
TEXT ·GemvN(SB), NOSPLIT, $32-128
MOVQ M_DIM, M
MOVQ N_DIM, N
CMPQ M, $0
JE end
CMPQ N, $0
JE end
MOVDDUP alpha+16(FP), ALPHA
MOVDDUP beta+88(FP), BETA
MOVQ x_base+56(FP), X_PTR
MOVQ y_base+96(FP), Y_PTR
MOVQ a_base+24(FP), A_ROW
MOVQ incY+120(FP), INC_Y
MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64)
SHLQ $3, LDA
LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3
MOVQ A_ROW, A_PTR
XORQ TMP2, TMP2
MOVQ M, TMP1
SUBQ $1, TMP1
IMULQ INC_Y, TMP1
NEGQ TMP1
CMPQ INC_Y, $0
CMOVQLT TMP1, TMP2
LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR
MOVQ Y_PTR, Y
SHLQ $3, INC_Y // INC_Y = incY * sizeof(float64)
LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3
MOVSD $0.0, X0
COMISD BETA, X0
JNE gemv_start // if beta != 0 { goto gemv_start }
gemv_clear: // beta == 0 is special cased to clear memory (no nan handling)
XORPS X0, X0
XORPS X1, X1
XORPS X2, X2
XORPS X3, X3
CMPQ incY+120(FP), $1 // Check for dense vector X (fast-path)
JNE inc_clear
SHRQ $3, M
JZ clear4
clear8:
MOVUPS X0, (Y_PTR)
MOVUPS X1, 16(Y_PTR)
MOVUPS X2, 32(Y_PTR)
MOVUPS X3, 48(Y_PTR)
ADDQ $8*SIZE, Y_PTR
DECQ M
JNZ clear8
clear4:
TESTQ $4, M_DIM
JZ clear2
MOVUPS X0, (Y_PTR)
MOVUPS X1, 16(Y_PTR)
ADDQ $4*SIZE, Y_PTR
clear2:
TESTQ $2, M_DIM
JZ clear1
MOVUPS X0, (Y_PTR)
ADDQ $2*SIZE, Y_PTR
clear1:
TESTQ $1, M_DIM
JZ prep_end
MOVSD X0, (Y_PTR)
JMP prep_end
inc_clear:
SHRQ $2, M
JZ inc_clear2
inc_clear4:
MOVSD X0, (Y_PTR)
MOVSD X1, (Y_PTR)(INC_Y*1)
MOVSD X2, (Y_PTR)(INC_Y*2)
MOVSD X3, (Y_PTR)(INC3_Y*1)
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ M
JNZ inc_clear4
inc_clear2:
TESTQ $2, M_DIM
JZ inc_clear1
MOVSD X0, (Y_PTR)
MOVSD X1, (Y_PTR)(INC_Y*1)
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_clear1:
TESTQ $1, M_DIM
JZ prep_end
MOVSD X0, (Y_PTR)
prep_end:
MOVQ Y, Y_PTR
MOVQ M_DIM, M
gemv_start:
CMPQ incX+80(FP), $1 // Check for dense vector X (fast-path)
JNE inc
SHRQ $2, M
JZ r2
r4:
// LOAD 4
INIT4
MOVQ N_DIM, N
SHRQ $2, N
JZ r4c2
r4c4:
// 4x4 KERNEL
KERNEL_LOAD4
KERNEL_4x4
ADDQ $4*SIZE, X_PTR
DECQ N
JNZ r4c4
r4c2:
TESTQ $2, N_DIM
JZ r4c1
// 4x2 KERNEL
KERNEL_LOAD2
KERNEL_4x2
ADDQ $2*SIZE, X_PTR
r4c1:
HADDPD X1, X0
HADDPD X3, X2
TESTQ $1, N_DIM
JZ r4end
// 4x1 KERNEL
KERNEL_4x1
ADDQ $SIZE, X_PTR
r4end:
CMPQ INC_Y, $SIZE
JNZ r4st_inc
STORE4
ADDQ $4*SIZE, Y_PTR
JMP r4inc
r4st_inc:
STORE4_INC
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
r4inc:
MOVQ X, X_PTR
LEAQ (A_ROW)(LDA*4), A_ROW
MOVQ A_ROW, A_PTR
DECQ M
JNZ r4
r2:
TESTQ $2, M_DIM
JZ r1
// LOAD 2
INIT2
MOVQ N_DIM, N
SHRQ $2, N
JZ r2c2
r2c4:
// 2x4 KERNEL
KERNEL_LOAD4
KERNEL_2x4
ADDQ $4*SIZE, X_PTR
DECQ N
JNZ r2c4
r2c2:
TESTQ $2, N_DIM
JZ r2c1
// 2x2 KERNEL
KERNEL_LOAD2
KERNEL_2x2
ADDQ $2*SIZE, X_PTR
r2c1:
HADDPD X1, X0
TESTQ $1, N_DIM
JZ r2end
// 2x1 KERNEL
KERNEL_2x1
ADDQ $SIZE, X_PTR
r2end:
CMPQ INC_Y, $SIZE
JNE r2st_inc
STORE2
ADDQ $2*SIZE, Y_PTR
JMP r2inc
r2st_inc:
STORE2_INC
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
r2inc:
MOVQ X, X_PTR
LEAQ (A_ROW)(LDA*2), A_ROW
MOVQ A_ROW, A_PTR
r1:
TESTQ $1, M_DIM
JZ end
// LOAD 1
INIT1
MOVQ N_DIM, N
SHRQ $2, N
JZ r1c2
r1c4:
// 1x4 KERNEL
KERNEL_LOAD4
KERNEL_1x4
ADDQ $4*SIZE, X_PTR
DECQ N
JNZ r1c4
r1c2:
TESTQ $2, N_DIM
JZ r1c1
// 1x2 KERNEL
KERNEL_LOAD2
KERNEL_1x2
ADDQ $2*SIZE, X_PTR
r1c1:
TESTQ $1, N_DIM
JZ r1end
// 1x1 KERNEL
KERNEL_1x1
r1end:
STORE1
end:
RET
inc: // Algorithm for incX != 1 ( split loads in kernel )
MOVQ incX+80(FP), INC_X // INC_X = incX
XORQ TMP2, TMP2 // TMP2 = 0
MOVQ N, TMP1 // TMP1 = N
SUBQ $1, TMP1 // TMP1 -= 1
NEGQ TMP1 // TMP1 = -TMP1
IMULQ INC_X, TMP1 // TMP1 *= INC_X
CMPQ INC_X, $0 // if INC_X < 0 { TMP2 = TMP1 }
CMOVQLT TMP1, TMP2
LEAQ (X_PTR)(TMP2*SIZE), X_PTR // X_PTR = X_PTR[TMP2]
MOVQ X_PTR, X // X = X_PTR
SHLQ $3, INC_X
LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3
SHRQ $2, M
JZ inc_r2
inc_r4:
// LOAD 4
INIT4
MOVQ N_DIM, N
SHRQ $2, N
JZ inc_r4c2
inc_r4c4:
// 4x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_4x4
LEAQ (X_PTR)(INC_X*4), X_PTR
DECQ N
JNZ inc_r4c4
inc_r4c2:
TESTQ $2, N_DIM
JZ inc_r4c1
// 4x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_4x2
LEAQ (X_PTR)(INC_X*2), X_PTR
inc_r4c1:
HADDPD X1, X0
HADDPD X3, X2
TESTQ $1, N_DIM
JZ inc_r4end
// 4x1 KERNEL
KERNEL_4x1
ADDQ INC_X, X_PTR
inc_r4end:
CMPQ INC_Y, $SIZE
JNE inc_r4st_inc
STORE4
ADDQ $4*SIZE, Y_PTR
JMP inc_r4inc
inc_r4st_inc:
STORE4_INC
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
inc_r4inc:
MOVQ X, X_PTR
LEAQ (A_ROW)(LDA*4), A_ROW
MOVQ A_ROW, A_PTR
DECQ M
JNZ inc_r4
inc_r2:
TESTQ $2, M_DIM
JZ inc_r1
// LOAD 2
INIT2
MOVQ N_DIM, N
SHRQ $2, N
JZ inc_r2c2
inc_r2c4:
// 2x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_2x4
LEAQ (X_PTR)(INC_X*4), X_PTR
DECQ N
JNZ inc_r2c4
inc_r2c2:
TESTQ $2, N_DIM
JZ inc_r2c1
// 2x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_2x2
LEAQ (X_PTR)(INC_X*2), X_PTR
inc_r2c1:
HADDPD X1, X0
TESTQ $1, N_DIM
JZ inc_r2end
// 2x1 KERNEL
KERNEL_2x1
ADDQ INC_X, X_PTR
inc_r2end:
CMPQ INC_Y, $SIZE
JNE inc_r2st_inc
STORE2
ADDQ $2*SIZE, Y_PTR
JMP inc_r2inc
inc_r2st_inc:
STORE2_INC
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_r2inc:
MOVQ X, X_PTR
LEAQ (A_ROW)(LDA*2), A_ROW
MOVQ A_ROW, A_PTR
inc_r1:
TESTQ $1, M_DIM
JZ inc_end
// LOAD 1
INIT1
MOVQ N_DIM, N
SHRQ $2, N
JZ inc_r1c2
inc_r1c4:
// 1x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_1x4
LEAQ (X_PTR)(INC_X*4), X_PTR
DECQ N
JNZ inc_r1c4
inc_r1c2:
TESTQ $2, N_DIM
JZ inc_r1c1
// 1x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_1x2
LEAQ (X_PTR)(INC_X*2), X_PTR
inc_r1c1:
TESTQ $1, N_DIM
JZ inc_r1end
// 1x1 KERNEL
KERNEL_1x1
inc_r1end:
STORE1
inc_end:
RET

View File

@@ -0,0 +1,745 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define SIZE 8
#define M_DIM n+8(FP)
#define M CX
#define N_DIM m+0(FP)
#define N BX
#define TMP1 R14
#define TMP2 R15
#define X_PTR SI
#define X x_base+56(FP)
#define Y_PTR DX
#define Y y_base+96(FP)
#define A_ROW AX
#define A_PTR DI
#define INC_X R8
#define INC3_X R9
#define INC_Y R10
#define INC3_Y R11
#define LDA R12
#define LDA3 R13
#define ALPHA X15
#define BETA X14
#define INIT4 \
MOVDDUP (X_PTR), X8 \
MOVDDUP (X_PTR)(INC_X*1), X9 \
MOVDDUP (X_PTR)(INC_X*2), X10 \
MOVDDUP (X_PTR)(INC3_X*1), X11 \
MULPD ALPHA, X8 \
MULPD ALPHA, X9 \
MULPD ALPHA, X10 \
MULPD ALPHA, X11
#define INIT2 \
MOVDDUP (X_PTR), X8 \
MOVDDUP (X_PTR)(INC_X*1), X9 \
MULPD ALPHA, X8 \
MULPD ALPHA, X9
#define INIT1 \
MOVDDUP (X_PTR), X8 \
MULPD ALPHA, X8
#define KERNEL_LOAD4 \
MOVUPS (Y_PTR), X0 \
MOVUPS 2*SIZE(Y_PTR), X1
#define KERNEL_LOAD2 \
MOVUPS (Y_PTR), X0
#define KERNEL_LOAD4_INC \
MOVSD (Y_PTR), X0 \
MOVHPD (Y_PTR)(INC_Y*1), X0 \
MOVSD (Y_PTR)(INC_Y*2), X1 \
MOVHPD (Y_PTR)(INC3_Y*1), X1
#define KERNEL_LOAD2_INC \
MOVSD (Y_PTR), X0 \
MOVHPD (Y_PTR)(INC_Y*1), X0
#define KERNEL_4x4 \
MOVUPS (A_PTR), X4 \
MOVUPS 2*SIZE(A_PTR), X5 \
MOVUPS (A_PTR)(LDA*1), X6 \
MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \
MULPD X8, X4 \
MULPD X8, X5 \
MULPD X9, X6 \
MULPD X9, X7 \
ADDPD X4, X0 \
ADDPD X5, X1 \
ADDPD X6, X0 \
ADDPD X7, X1 \
MOVUPS (A_PTR)(LDA*2), X4 \
MOVUPS 2*SIZE(A_PTR)(LDA*2), X5 \
MOVUPS (A_PTR)(LDA3*1), X6 \
MOVUPS 2*SIZE(A_PTR)(LDA3*1), X7 \
MULPD X10, X4 \
MULPD X10, X5 \
MULPD X11, X6 \
MULPD X11, X7 \
ADDPD X4, X0 \
ADDPD X5, X1 \
ADDPD X6, X0 \
ADDPD X7, X1 \
ADDQ $4*SIZE, A_PTR
#define KERNEL_4x2 \
MOVUPS (A_PTR), X4 \
MOVUPS 2*SIZE(A_PTR), X5 \
MOVUPS (A_PTR)(LDA*1), X6 \
MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \
MULPD X8, X4 \
MULPD X8, X5 \
MULPD X9, X6 \
MULPD X9, X7 \
ADDPD X4, X0 \
ADDPD X5, X1 \
ADDPD X6, X0 \
ADDPD X7, X1 \
ADDQ $4*SIZE, A_PTR
#define KERNEL_4x1 \
MOVUPS (A_PTR), X4 \
MOVUPS 2*SIZE(A_PTR), X5 \
MULPD X8, X4 \
MULPD X8, X5 \
ADDPD X4, X0 \
ADDPD X5, X1 \
ADDQ $4*SIZE, A_PTR
#define STORE4 \
MOVUPS X0, (Y_PTR) \
MOVUPS X1, 2*SIZE(Y_PTR)
#define STORE4_INC \
MOVLPD X0, (Y_PTR) \
MOVHPD X0, (Y_PTR)(INC_Y*1) \
MOVLPD X1, (Y_PTR)(INC_Y*2) \
MOVHPD X1, (Y_PTR)(INC3_Y*1)
#define KERNEL_2x4 \
MOVUPS (A_PTR), X4 \
MOVUPS (A_PTR)(LDA*1), X5 \
MOVUPS (A_PTR)(LDA*2), X6 \
MOVUPS (A_PTR)(LDA3*1), X7 \
MULPD X8, X4 \
MULPD X9, X5 \
MULPD X10, X6 \
MULPD X11, X7 \
ADDPD X4, X0 \
ADDPD X5, X0 \
ADDPD X6, X0 \
ADDPD X7, X0 \
ADDQ $2*SIZE, A_PTR
#define KERNEL_2x2 \
MOVUPS (A_PTR), X4 \
MOVUPS (A_PTR)(LDA*1), X5 \
MULPD X8, X4 \
MULPD X9, X5 \
ADDPD X4, X0 \
ADDPD X5, X0 \
ADDQ $2*SIZE, A_PTR
#define KERNEL_2x1 \
MOVUPS (A_PTR), X4 \
MULPD X8, X4 \
ADDPD X4, X0 \
ADDQ $2*SIZE, A_PTR
#define STORE2 \
MOVUPS X0, (Y_PTR)
#define STORE2_INC \
MOVLPD X0, (Y_PTR) \
MOVHPD X0, (Y_PTR)(INC_Y*1)
#define KERNEL_1x4 \
MOVSD (Y_PTR), X0 \
MOVSD (A_PTR), X4 \
MOVSD (A_PTR)(LDA*1), X5 \
MOVSD (A_PTR)(LDA*2), X6 \
MOVSD (A_PTR)(LDA3*1), X7 \
MULSD X8, X4 \
MULSD X9, X5 \
MULSD X10, X6 \
MULSD X11, X7 \
ADDSD X4, X0 \
ADDSD X5, X0 \
ADDSD X6, X0 \
ADDSD X7, X0 \
MOVSD X0, (Y_PTR) \
ADDQ $SIZE, A_PTR
#define KERNEL_1x2 \
MOVSD (Y_PTR), X0 \
MOVSD (A_PTR), X4 \
MOVSD (A_PTR)(LDA*1), X5 \
MULSD X8, X4 \
MULSD X9, X5 \
ADDSD X4, X0 \
ADDSD X5, X0 \
MOVSD X0, (Y_PTR) \
ADDQ $SIZE, A_PTR
#define KERNEL_1x1 \
MOVSD (Y_PTR), X0 \
MOVSD (A_PTR), X4 \
MULSD X8, X4 \
ADDSD X4, X0 \
MOVSD X0, (Y_PTR) \
ADDQ $SIZE, A_PTR
#define SCALE_8(PTR, SCAL) \
MOVUPS (PTR), X0 \
MOVUPS 16(PTR), X1 \
MOVUPS 32(PTR), X2 \
MOVUPS 48(PTR), X3 \
MULPD SCAL, X0 \
MULPD SCAL, X1 \
MULPD SCAL, X2 \
MULPD SCAL, X3 \
MOVUPS X0, (PTR) \
MOVUPS X1, 16(PTR) \
MOVUPS X2, 32(PTR) \
MOVUPS X3, 48(PTR)
#define SCALE_4(PTR, SCAL) \
MOVUPS (PTR), X0 \
MOVUPS 16(PTR), X1 \
MULPD SCAL, X0 \
MULPD SCAL, X1 \
MOVUPS X0, (PTR) \
MOVUPS X1, 16(PTR) \
#define SCALE_2(PTR, SCAL) \
MOVUPS (PTR), X0 \
MULPD SCAL, X0 \
MOVUPS X0, (PTR) \
#define SCALE_1(PTR, SCAL) \
MOVSD (PTR), X0 \
MULSD SCAL, X0 \
MOVSD X0, (PTR) \
#define SCALEINC_4(PTR, INC, INC3, SCAL) \
MOVSD (PTR), X0 \
MOVSD (PTR)(INC*1), X1 \
MOVSD (PTR)(INC*2), X2 \
MOVSD (PTR)(INC3*1), X3 \
MULSD SCAL, X0 \
MULSD SCAL, X1 \
MULSD SCAL, X2 \
MULSD SCAL, X3 \
MOVSD X0, (PTR) \
MOVSD X1, (PTR)(INC*1) \
MOVSD X2, (PTR)(INC*2) \
MOVSD X3, (PTR)(INC3*1)
#define SCALEINC_2(PTR, INC, SCAL) \
MOVSD (PTR), X0 \
MOVSD (PTR)(INC*1), X1 \
MULSD SCAL, X0 \
MULSD SCAL, X1 \
MOVSD X0, (PTR) \
MOVSD X1, (PTR)(INC*1)
// func GemvT(m, n int,
// alpha float64,
// a []float64, lda int,
// x []float64, incX int,
// beta float64,
// y []float64, incY int)
TEXT ·GemvT(SB), NOSPLIT, $32-128
MOVQ M_DIM, M
MOVQ N_DIM, N
CMPQ M, $0
JE end
CMPQ N, $0
JE end
MOVDDUP alpha+16(FP), ALPHA
MOVQ x_base+56(FP), X_PTR
MOVQ y_base+96(FP), Y_PTR
MOVQ a_base+24(FP), A_ROW
MOVQ incY+120(FP), INC_Y // INC_Y = incY * sizeof(float64)
MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64)
SHLQ $3, LDA
LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3
MOVQ A_ROW, A_PTR
MOVQ incX+80(FP), INC_X // INC_X = incX * sizeof(float64)
XORQ TMP2, TMP2
MOVQ N, TMP1
SUBQ $1, TMP1
NEGQ TMP1
IMULQ INC_X, TMP1
CMPQ INC_X, $0
CMOVQLT TMP1, TMP2
LEAQ (X_PTR)(TMP2*SIZE), X_PTR
MOVQ X_PTR, X
SHLQ $3, INC_X
LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3
CMPQ incY+120(FP), $1 // Check for dense vector Y (fast-path)
JNE inc
MOVSD $1.0, X0
COMISD beta+88(FP), X0
JE gemv_start
MOVSD $0.0, X0
COMISD beta+88(FP), X0
JE gemv_clear
MOVDDUP beta+88(FP), BETA
SHRQ $3, M
JZ scal4
scal8:
SCALE_8(Y_PTR, BETA)
ADDQ $8*SIZE, Y_PTR
DECQ M
JNZ scal8
scal4:
TESTQ $4, M_DIM
JZ scal2
SCALE_4(Y_PTR, BETA)
ADDQ $4*SIZE, Y_PTR
scal2:
TESTQ $2, M_DIM
JZ scal1
SCALE_2(Y_PTR, BETA)
ADDQ $2*SIZE, Y_PTR
scal1:
TESTQ $1, M_DIM
JZ prep_end
SCALE_1(Y_PTR, BETA)
JMP prep_end
gemv_clear: // beta == 0 is special cased to clear memory (no nan handling)
XORPS X0, X0
XORPS X1, X1
XORPS X2, X2
XORPS X3, X3
SHRQ $3, M
JZ clear4
clear8:
MOVUPS X0, (Y_PTR)
MOVUPS X1, 16(Y_PTR)
MOVUPS X2, 32(Y_PTR)
MOVUPS X3, 48(Y_PTR)
ADDQ $8*SIZE, Y_PTR
DECQ M
JNZ clear8
clear4:
TESTQ $4, M_DIM
JZ clear2
MOVUPS X0, (Y_PTR)
MOVUPS X1, 16(Y_PTR)
ADDQ $4*SIZE, Y_PTR
clear2:
TESTQ $2, M_DIM
JZ clear1
MOVUPS X0, (Y_PTR)
ADDQ $2*SIZE, Y_PTR
clear1:
TESTQ $1, M_DIM
JZ prep_end
MOVSD X0, (Y_PTR)
prep_end:
MOVQ Y, Y_PTR
MOVQ M_DIM, M
gemv_start:
SHRQ $2, N
JZ c2
c4:
// LOAD 4
INIT4
MOVQ M_DIM, M
SHRQ $2, M
JZ c4r2
c4r4:
// 4x4 KERNEL
KERNEL_LOAD4
KERNEL_4x4
STORE4
ADDQ $4*SIZE, Y_PTR
DECQ M
JNZ c4r4
c4r2:
TESTQ $2, M_DIM
JZ c4r1
// 4x2 KERNEL
KERNEL_LOAD2
KERNEL_2x4
STORE2
ADDQ $2*SIZE, Y_PTR
c4r1:
TESTQ $1, M_DIM
JZ c4end
// 4x1 KERNEL
KERNEL_1x4
ADDQ $SIZE, Y_PTR
c4end:
LEAQ (X_PTR)(INC_X*4), X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*4), A_ROW
MOVQ A_ROW, A_PTR
DECQ N
JNZ c4
c2:
TESTQ $2, N_DIM
JZ c1
// LOAD 2
INIT2
MOVQ M_DIM, M
SHRQ $2, M
JZ c2r2
c2r4:
// 2x4 KERNEL
KERNEL_LOAD4
KERNEL_4x2
STORE4
ADDQ $4*SIZE, Y_PTR
DECQ M
JNZ c2r4
c2r2:
TESTQ $2, M_DIM
JZ c2r1
// 2x2 KERNEL
KERNEL_LOAD2
KERNEL_2x2
STORE2
ADDQ $2*SIZE, Y_PTR
c2r1:
TESTQ $1, M_DIM
JZ c2end
// 2x1 KERNEL
KERNEL_1x2
ADDQ $SIZE, Y_PTR
c2end:
LEAQ (X_PTR)(INC_X*2), X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*2), A_ROW
MOVQ A_ROW, A_PTR
c1:
TESTQ $1, N_DIM
JZ end
// LOAD 1
INIT1
MOVQ M_DIM, M
SHRQ $2, M
JZ c1r2
c1r4:
// 1x4 KERNEL
KERNEL_LOAD4
KERNEL_4x1
STORE4
ADDQ $4*SIZE, Y_PTR
DECQ M
JNZ c1r4
c1r2:
TESTQ $2, M_DIM
JZ c1r1
// 1x2 KERNEL
KERNEL_LOAD2
KERNEL_2x1
STORE2
ADDQ $2*SIZE, Y_PTR
c1r1:
TESTQ $1, M_DIM
JZ end
// 1x1 KERNEL
KERNEL_1x1
end:
RET
inc: // Algorithm for incX != 0 ( split loads in kernel )
XORQ TMP2, TMP2
MOVQ M, TMP1
SUBQ $1, TMP1
IMULQ INC_Y, TMP1
NEGQ TMP1
CMPQ INC_Y, $0
CMOVQLT TMP1, TMP2
LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR
MOVQ Y_PTR, Y
SHLQ $3, INC_Y
LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3
MOVSD $1.0, X0
COMISD beta+88(FP), X0
JE inc_gemv_start
MOVSD $0.0, X0
COMISD beta+88(FP), X0
JE inc_gemv_clear
MOVDDUP beta+88(FP), BETA
SHRQ $2, M
JZ inc_scal2
inc_scal4:
SCALEINC_4(Y_PTR, INC_Y, INC3_Y, BETA)
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ M
JNZ inc_scal4
inc_scal2:
TESTQ $2, M_DIM
JZ inc_scal1
SCALEINC_2(Y_PTR, INC_Y, BETA)
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_scal1:
TESTQ $1, M_DIM
JZ inc_prep_end
SCALE_1(Y_PTR, BETA)
JMP inc_prep_end
inc_gemv_clear: // beta == 0 is special-cased to clear memory (no nan handling)
XORPS X0, X0
XORPS X1, X1
XORPS X2, X2
XORPS X3, X3
SHRQ $2, M
JZ inc_clear2
inc_clear4:
MOVSD X0, (Y_PTR)
MOVSD X1, (Y_PTR)(INC_Y*1)
MOVSD X2, (Y_PTR)(INC_Y*2)
MOVSD X3, (Y_PTR)(INC3_Y*1)
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ M
JNZ inc_clear4
inc_clear2:
TESTQ $2, M_DIM
JZ inc_clear1
MOVSD X0, (Y_PTR)
MOVSD X1, (Y_PTR)(INC_Y*1)
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_clear1:
TESTQ $1, M_DIM
JZ inc_prep_end
MOVSD X0, (Y_PTR)
inc_prep_end:
MOVQ Y, Y_PTR
MOVQ M_DIM, M
inc_gemv_start:
SHRQ $2, N
JZ inc_c2
inc_c4:
// LOAD 4
INIT4
MOVQ M_DIM, M
SHRQ $2, M
JZ inc_c4r2
inc_c4r4:
// 4x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_4x4
STORE4_INC
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ M
JNZ inc_c4r4
inc_c4r2:
TESTQ $2, M_DIM
JZ inc_c4r1
// 4x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_2x4
STORE2_INC
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_c4r1:
TESTQ $1, M_DIM
JZ inc_c4end
// 4x1 KERNEL
KERNEL_1x4
ADDQ INC_Y, Y_PTR
inc_c4end:
LEAQ (X_PTR)(INC_X*4), X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*4), A_ROW
MOVQ A_ROW, A_PTR
DECQ N
JNZ inc_c4
inc_c2:
TESTQ $2, N_DIM
JZ inc_c1
// LOAD 2
INIT2
MOVQ M_DIM, M
SHRQ $2, M
JZ inc_c2r2
inc_c2r4:
// 2x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_4x2
STORE4_INC
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ M
JNZ inc_c2r4
inc_c2r2:
TESTQ $2, M_DIM
JZ inc_c2r1
// 2x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_2x2
STORE2_INC
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_c2r1:
TESTQ $1, M_DIM
JZ inc_c2end
// 2x1 KERNEL
KERNEL_1x2
ADDQ INC_Y, Y_PTR
inc_c2end:
LEAQ (X_PTR)(INC_X*2), X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*2), A_ROW
MOVQ A_ROW, A_PTR
inc_c1:
TESTQ $1, N_DIM
JZ inc_end
// LOAD 1
INIT1
MOVQ M_DIM, M
SHRQ $2, M
JZ inc_c1r2
inc_c1r4:
// 1x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_4x1
STORE4_INC
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ M
JNZ inc_c1r4
inc_c1r2:
TESTQ $2, M_DIM
JZ inc_c1r1
// 1x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_2x1
STORE2_INC
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_c1r1:
TESTQ $1, M_DIM
JZ inc_end
// 1x1 KERNEL
KERNEL_1x1
inc_end:
RET

591
vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,591 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define SIZE 8
#define M_DIM m+0(FP)
#define M CX
#define N_DIM n+8(FP)
#define N BX
#define TMP1 R14
#define TMP2 R15
#define X_PTR SI
#define Y y_base+56(FP)
#define Y_PTR DX
#define A_ROW AX
#define A_PTR DI
#define INC_X R8
#define INC3_X R9
#define INC_Y R10
#define INC3_Y R11
#define LDA R12
#define LDA3 R13
#define ALPHA X0
#define LOAD4 \
PREFETCHNTA (X_PTR )(INC_X*8) \
MOVDDUP (X_PTR), X1 \
MOVDDUP (X_PTR)(INC_X*1), X2 \
MOVDDUP (X_PTR)(INC_X*2), X3 \
MOVDDUP (X_PTR)(INC3_X*1), X4 \
MULPD ALPHA, X1 \
MULPD ALPHA, X2 \
MULPD ALPHA, X3 \
MULPD ALPHA, X4
#define LOAD2 \
MOVDDUP (X_PTR), X1 \
MOVDDUP (X_PTR)(INC_X*1), X2 \
MULPD ALPHA, X1 \
MULPD ALPHA, X2
#define LOAD1 \
MOVDDUP (X_PTR), X1 \
MULPD ALPHA, X1
#define KERNEL_LOAD4 \
MOVUPS (Y_PTR), X5 \
MOVUPS 2*SIZE(Y_PTR), X6
#define KERNEL_LOAD4_INC \
MOVLPD (Y_PTR), X5 \
MOVHPD (Y_PTR)(INC_Y*1), X5 \
MOVLPD (Y_PTR)(INC_Y*2), X6 \
MOVHPD (Y_PTR)(INC3_Y*1), X6
#define KERNEL_LOAD2 \
MOVUPS (Y_PTR), X5
#define KERNEL_LOAD2_INC \
MOVLPD (Y_PTR), X5 \
MOVHPD (Y_PTR)(INC_Y*1), X5
#define KERNEL_4x4 \
MOVUPS X5, X7 \
MOVUPS X6, X8 \
MOVUPS X5, X9 \
MOVUPS X6, X10 \
MOVUPS X5, X11 \
MOVUPS X6, X12 \
MULPD X1, X5 \
MULPD X1, X6 \
MULPD X2, X7 \
MULPD X2, X8 \
MULPD X3, X9 \
MULPD X3, X10 \
MULPD X4, X11 \
MULPD X4, X12
#define STORE_4x4 \
MOVUPS (A_PTR), X13 \
ADDPD X13, X5 \
MOVUPS 2*SIZE(A_PTR), X14 \
ADDPD X14, X6 \
MOVUPS (A_PTR)(LDA*1), X15 \
ADDPD X15, X7 \
MOVUPS 2*SIZE(A_PTR)(LDA*1), X0 \
ADDPD X0, X8 \
MOVUPS (A_PTR)(LDA*2), X13 \
ADDPD X13, X9 \
MOVUPS 2*SIZE(A_PTR)(LDA*2), X14 \
ADDPD X14, X10 \
MOVUPS (A_PTR)(LDA3*1), X15 \
ADDPD X15, X11 \
MOVUPS 2*SIZE(A_PTR)(LDA3*1), X0 \
ADDPD X0, X12 \
MOVUPS X5, (A_PTR) \
MOVUPS X6, 2*SIZE(A_PTR) \
MOVUPS X7, (A_PTR)(LDA*1) \
MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \
MOVUPS X9, (A_PTR)(LDA*2) \
MOVUPS X10, 2*SIZE(A_PTR)(LDA*2) \
MOVUPS X11, (A_PTR)(LDA3*1) \
MOVUPS X12, 2*SIZE(A_PTR)(LDA3*1) \
ADDQ $4*SIZE, A_PTR
#define KERNEL_4x2 \
MOVUPS X5, X6 \
MOVUPS X5, X7 \
MOVUPS X5, X8 \
MULPD X1, X5 \
MULPD X2, X6 \
MULPD X3, X7 \
MULPD X4, X8
#define STORE_4x2 \
MOVUPS (A_PTR), X9 \
ADDPD X9, X5 \
MOVUPS (A_PTR)(LDA*1), X10 \
ADDPD X10, X6 \
MOVUPS (A_PTR)(LDA*2), X11 \
ADDPD X11, X7 \
MOVUPS (A_PTR)(LDA3*1), X12 \
ADDPD X12, X8 \
MOVUPS X5, (A_PTR) \
MOVUPS X6, (A_PTR)(LDA*1) \
MOVUPS X7, (A_PTR)(LDA*2) \
MOVUPS X8, (A_PTR)(LDA3*1) \
ADDQ $2*SIZE, A_PTR
#define KERNEL_4x1 \
MOVSD (Y_PTR), X5 \
MOVSD X5, X6 \
MOVSD X5, X7 \
MOVSD X5, X8 \
MULSD X1, X5 \
MULSD X2, X6 \
MULSD X3, X7 \
MULSD X4, X8
#define STORE_4x1 \
ADDSD (A_PTR), X5 \
ADDSD (A_PTR)(LDA*1), X6 \
ADDSD (A_PTR)(LDA*2), X7 \
ADDSD (A_PTR)(LDA3*1), X8 \
MOVSD X5, (A_PTR) \
MOVSD X6, (A_PTR)(LDA*1) \
MOVSD X7, (A_PTR)(LDA*2) \
MOVSD X8, (A_PTR)(LDA3*1) \
ADDQ $SIZE, A_PTR
#define KERNEL_2x4 \
MOVUPS X5, X7 \
MOVUPS X6, X8 \
MULPD X1, X5 \
MULPD X1, X6 \
MULPD X2, X7 \
MULPD X2, X8
#define STORE_2x4 \
MOVUPS (A_PTR), X9 \
ADDPD X9, X5 \
MOVUPS 2*SIZE(A_PTR), X10 \
ADDPD X10, X6 \
MOVUPS (A_PTR)(LDA*1), X11 \
ADDPD X11, X7 \
MOVUPS 2*SIZE(A_PTR)(LDA*1), X12 \
ADDPD X12, X8 \
MOVUPS X5, (A_PTR) \
MOVUPS X6, 2*SIZE(A_PTR) \
MOVUPS X7, (A_PTR)(LDA*1) \
MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \
ADDQ $4*SIZE, A_PTR
#define KERNEL_2x2 \
MOVUPS X5, X6 \
MULPD X1, X5 \
MULPD X2, X6
#define STORE_2x2 \
MOVUPS (A_PTR), X7 \
ADDPD X7, X5 \
MOVUPS (A_PTR)(LDA*1), X8 \
ADDPD X8, X6 \
MOVUPS X5, (A_PTR) \
MOVUPS X6, (A_PTR)(LDA*1) \
ADDQ $2*SIZE, A_PTR
#define KERNEL_2x1 \
MOVSD (Y_PTR), X5 \
MOVSD X5, X6 \
MULSD X1, X5 \
MULSD X2, X6
#define STORE_2x1 \
ADDSD (A_PTR), X5 \
ADDSD (A_PTR)(LDA*1), X6 \
MOVSD X5, (A_PTR) \
MOVSD X6, (A_PTR)(LDA*1) \
ADDQ $SIZE, A_PTR
#define KERNEL_1x4 \
MULPD X1, X5 \
MULPD X1, X6
#define STORE_1x4 \
MOVUPS (A_PTR), X7 \
ADDPD X7, X5 \
MOVUPS 2*SIZE(A_PTR), X8 \
ADDPD X8, X6 \
MOVUPS X5, (A_PTR) \
MOVUPS X6, 2*SIZE(A_PTR) \
ADDQ $4*SIZE, A_PTR
#define KERNEL_1x2 \
MULPD X1, X5
#define STORE_1x2 \
MOVUPS (A_PTR), X6 \
ADDPD X6, X5 \
MOVUPS X5, (A_PTR) \
ADDQ $2*SIZE, A_PTR
#define KERNEL_1x1 \
MOVSD (Y_PTR), X5 \
MULSD X1, X5
#define STORE_1x1 \
ADDSD (A_PTR), X5 \
MOVSD X5, (A_PTR) \
ADDQ $SIZE, A_PTR
// func Ger(m, n uintptr, alpha float64,
// x []float64, incX uintptr,
// y []float64, incY uintptr,
// a []float64, lda uintptr)
TEXT ·Ger(SB), NOSPLIT, $0
MOVQ M_DIM, M
MOVQ N_DIM, N
CMPQ M, $0
JE end
CMPQ N, $0
JE end
MOVDDUP alpha+16(FP), ALPHA
MOVQ x_base+24(FP), X_PTR
MOVQ y_base+56(FP), Y_PTR
MOVQ a_base+88(FP), A_ROW
MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float64)
SHLQ $3, INC_X
MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float64)
SHLQ $3, LDA
LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3
LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3
MOVQ A_ROW, A_PTR
XORQ TMP2, TMP2
MOVQ M, TMP1
SUBQ $1, TMP1
IMULQ INC_X, TMP1
NEGQ TMP1
CMPQ INC_X, $0
CMOVQLT TMP1, TMP2
LEAQ (X_PTR)(TMP2*SIZE), X_PTR
CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path)
JG inc
JL end
SHRQ $2, M
JZ r2
r4:
// LOAD 4
LOAD4
MOVQ N_DIM, N
SHRQ $2, N
JZ r4c2
r4c4:
// 4x4 KERNEL
KERNEL_LOAD4
KERNEL_4x4
STORE_4x4
ADDQ $4*SIZE, Y_PTR
DECQ N
JNZ r4c4
// Reload ALPHA after it's clobbered by STORE_4x4
MOVDDUP alpha+16(FP), ALPHA
r4c2:
TESTQ $2, N_DIM
JZ r4c1
// 4x2 KERNEL
KERNEL_LOAD2
KERNEL_4x2
STORE_4x2
ADDQ $2*SIZE, Y_PTR
r4c1:
TESTQ $1, N_DIM
JZ r4end
// 4x1 KERNEL
KERNEL_4x1
STORE_4x1
ADDQ $SIZE, Y_PTR
r4end:
LEAQ (X_PTR)(INC_X*4), X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*4), A_ROW
MOVQ A_ROW, A_PTR
DECQ M
JNZ r4
r2:
TESTQ $2, M_DIM
JZ r1
// LOAD 2
LOAD2
MOVQ N_DIM, N
SHRQ $2, N
JZ r2c2
r2c4:
// 2x4 KERNEL
KERNEL_LOAD4
KERNEL_2x4
STORE_2x4
ADDQ $4*SIZE, Y_PTR
DECQ N
JNZ r2c4
r2c2:
TESTQ $2, N_DIM
JZ r2c1
// 2x2 KERNEL
KERNEL_LOAD2
KERNEL_2x2
STORE_2x2
ADDQ $2*SIZE, Y_PTR
r2c1:
TESTQ $1, N_DIM
JZ r2end
// 2x1 KERNEL
KERNEL_2x1
STORE_2x1
ADDQ $SIZE, Y_PTR
r2end:
LEAQ (X_PTR)(INC_X*2), X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*2), A_ROW
MOVQ A_ROW, A_PTR
r1:
TESTQ $1, M_DIM
JZ end
// LOAD 1
LOAD1
MOVQ N_DIM, N
SHRQ $2, N
JZ r1c2
r1c4:
// 1x4 KERNEL
KERNEL_LOAD4
KERNEL_1x4
STORE_1x4
ADDQ $4*SIZE, Y_PTR
DECQ N
JNZ r1c4
r1c2:
TESTQ $2, N_DIM
JZ r1c1
// 1x2 KERNEL
KERNEL_LOAD2
KERNEL_1x2
STORE_1x2
ADDQ $2*SIZE, Y_PTR
r1c1:
TESTQ $1, N_DIM
JZ end
// 1x1 KERNEL
KERNEL_1x1
STORE_1x1
ADDQ $SIZE, Y_PTR
end:
RET
inc: // Algorithm for incY != 1 ( split loads in kernel )
MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float64)
SHLQ $3, INC_Y
LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3
XORQ TMP2, TMP2
MOVQ N, TMP1
SUBQ $1, TMP1
IMULQ INC_Y, TMP1
NEGQ TMP1
CMPQ INC_Y, $0
CMOVQLT TMP1, TMP2
LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR
SHRQ $2, M
JZ inc_r2
inc_r4:
// LOAD 4
LOAD4
MOVQ N_DIM, N
SHRQ $2, N
JZ inc_r4c2
inc_r4c4:
// 4x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_4x4
STORE_4x4
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ N
JNZ inc_r4c4
// Reload ALPHA after it's clobbered by STORE_4x4
MOVDDUP alpha+16(FP), ALPHA
inc_r4c2:
TESTQ $2, N_DIM
JZ inc_r4c1
// 4x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_4x2
STORE_4x2
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_r4c1:
TESTQ $1, N_DIM
JZ inc_r4end
// 4x1 KERNEL
KERNEL_4x1
STORE_4x1
ADDQ INC_Y, Y_PTR
inc_r4end:
LEAQ (X_PTR)(INC_X*4), X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*4), A_ROW
MOVQ A_ROW, A_PTR
DECQ M
JNZ inc_r4
inc_r2:
TESTQ $2, M_DIM
JZ inc_r1
// LOAD 2
LOAD2
MOVQ N_DIM, N
SHRQ $2, N
JZ inc_r2c2
inc_r2c4:
// 2x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_2x4
STORE_2x4
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ N
JNZ inc_r2c4
inc_r2c2:
TESTQ $2, N_DIM
JZ inc_r2c1
// 2x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_2x2
STORE_2x2
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_r2c1:
TESTQ $1, N_DIM
JZ inc_r2end
// 2x1 KERNEL
KERNEL_2x1
STORE_2x1
ADDQ INC_Y, Y_PTR
inc_r2end:
LEAQ (X_PTR)(INC_X*2), X_PTR
MOVQ Y, Y_PTR
LEAQ (A_ROW)(LDA*2), A_ROW
MOVQ A_ROW, A_PTR
inc_r1:
TESTQ $1, M_DIM
JZ end
// LOAD 1
LOAD1
MOVQ N_DIM, N
SHRQ $2, N
JZ inc_r1c2
inc_r1c4:
// 1x4 KERNEL
KERNEL_LOAD4_INC
KERNEL_1x4
STORE_1x4
LEAQ (Y_PTR)(INC_Y*4), Y_PTR
DECQ N
JNZ inc_r1c4
inc_r1c2:
TESTQ $2, N_DIM
JZ inc_r1c1
// 1x2 KERNEL
KERNEL_LOAD2_INC
KERNEL_1x2
STORE_1x2
LEAQ (Y_PTR)(INC_Y*2), Y_PTR
inc_r1c1:
TESTQ $1, N_DIM
JZ end
// 1x1 KERNEL
KERNEL_1x1
STORE_1x1
ADDQ INC_Y, Y_PTR
inc_end:
RET

View File

@@ -0,0 +1,58 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
// func L1Dist(s, t []float64) float64
TEXT ·L1Dist(SB), NOSPLIT, $0
MOVQ s_base+0(FP), DI // DI = &s
MOVQ t_base+24(FP), SI // SI = &t
MOVQ s_len+8(FP), CX // CX = len(s)
CMPQ t_len+32(FP), CX // CX = max( CX, len(t) )
CMOVQLE t_len+32(FP), CX
PXOR X3, X3 // norm = 0
CMPQ CX, $0 // if CX == 0 { return 0 }
JE l1_end
XORQ AX, AX // i = 0
MOVQ CX, BX
ANDQ $1, BX // BX = CX % 2
SHRQ $1, CX // CX = floor( CX / 2 )
JZ l1_tail_start // if CX == 0 { return 0 }
l1_loop: // Loop unrolled 2x do {
MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1]
MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1]
MOVAPS X0, X2
SUBPD X1, X0
SUBPD X2, X1
MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 )
ADDPD X0, X3 // norm += X0
ADDQ $2, AX // i += 2
LOOP l1_loop // } while --CX > 0
CMPQ BX, $0 // if BX == 0 { return }
JE l1_end
l1_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
PXOR X0, X0 // reset X0, X1 to break dependencies
PXOR X1, X1
l1_tail:
MOVSD (SI)(AX*8), X0 // X0 = t[i]
MOVSD (DI)(AX*8), X1 // x1 = s[i]
MOVAPD X0, X2
SUBSD X1, X0
SUBSD X2, X1
MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 )
ADDSD X0, X3 // norm += X0
l1_end:
MOVAPS X3, X2
SHUFPD $1, X2, X2
ADDSD X3, X2 // X2 = X3[1] + X3[0]
MOVSD X2, ret+48(FP) // return X2
RET

View File

@@ -0,0 +1,57 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
// func LinfDist(s, t []float64) float64
TEXT ·LinfDist(SB), NOSPLIT, $0
MOVQ s_base+0(FP), DI // DI = &s
MOVQ t_base+24(FP), SI // SI = &t
MOVQ s_len+8(FP), CX // CX = len(s)
CMPQ t_len+32(FP), CX // CX = max( CX, len(t) )
CMOVQLE t_len+32(FP), CX
PXOR X3, X3 // norm = 0
CMPQ CX, $0 // if CX == 0 { return 0 }
JE l1_end
XORQ AX, AX // i = 0
MOVQ CX, BX
ANDQ $1, BX // BX = CX % 2
SHRQ $1, CX // CX = floor( CX / 2 )
JZ l1_tail_start // if CX == 0 { return 0 }
l1_loop: // Loop unrolled 2x do {
MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1]
MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1]
MOVAPS X0, X2
SUBPD X1, X0
SUBPD X2, X1
MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 )
MAXPD X0, X3 // norm = max( norm, X0 )
ADDQ $2, AX // i += 2
LOOP l1_loop // } while --CX > 0
CMPQ BX, $0 // if BX == 0 { return }
JE l1_end
l1_tail_start: // Reset loop registers
MOVQ BX, CX // Loop counter: CX = BX
PXOR X0, X0 // reset X0, X1 to break dependencies
PXOR X1, X1
l1_tail:
MOVSD (SI)(AX*8), X0 // X0 = t[i]
MOVSD (DI)(AX*8), X1 // X1 = s[i]
MOVAPD X0, X2
SUBSD X1, X0
SUBSD X2, X1
MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 )
MAXSD X0, X3 // norm = max( norm, X0 )
l1_end:
MOVAPS X3, X2
SHUFPD $1, X2, X2
MAXSD X3, X2 // X2 = max( X3[1], X3[0] )
MOVSD X2, ret+48(FP) // return X2
RET

57
vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go generated vendored Normal file
View File

@@ -0,0 +1,57 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 noasm appengine safe
package f64
// ScalUnitary is
// for i := range x {
// x[i] *= alpha
// }
func ScalUnitary(alpha float64, x []float64) {
for i := range x {
x[i] *= alpha
}
}
// ScalUnitaryTo is
// for i, v := range x {
// dst[i] = alpha * v
// }
func ScalUnitaryTo(dst []float64, alpha float64, x []float64) {
for i, v := range x {
dst[i] = alpha * v
}
}
// ScalInc is
// var ix uintptr
// for i := 0; i < int(n); i++ {
// x[ix] *= alpha
// ix += incX
// }
func ScalInc(alpha float64, x []float64, n, incX uintptr) {
var ix uintptr
for i := 0; i < int(n); i++ {
x[ix] *= alpha
ix += incX
}
}
// ScalIncTo is
// var idst, ix uintptr
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha * x[ix]
// ix += incX
// idst += incDst
// }
func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) {
var idst, ix uintptr
for i := 0; i < int(n); i++ {
dst[idst] = alpha * x[ix]
ix += incX
idst += incDst
}
}

View File

@@ -0,0 +1,113 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Some of the loop unrolling code is copied from:
// http://golang.org/src/math/big/arith_amd64.s
// which is distributed under these terms:
//
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define X_PTR SI
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R9
#define ALPHA X0
#define ALPHA_2 X1
// func ScalInc(alpha float64, x []float64, n, incX uintptr)
TEXT ·ScalInc(SB), NOSPLIT, $0
MOVSD alpha+0(FP), ALPHA // ALPHA = alpha
MOVQ x_base+8(FP), X_PTR // X_PTR = &x
MOVQ incX+40(FP), INC_X // INC_X = incX
SHLQ $3, INC_X // INC_X *= sizeof(float64)
MOVQ n+32(FP), LEN // LEN = n
CMPQ LEN, $0
JE end // if LEN == 0 { return }
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = LEN % 4
SHRQ $2, LEN // LEN = floor( LEN / 4 )
JZ tail_start // if LEN == 0 { goto tail_start }
MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
loop: // do { // x[i] *= alpha unrolled 4x.
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MOVSD (X_PTR)(INC_X*2), X4
MOVSD (X_PTR)(INCx3_X*1), X5
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA_2, X3
MULSD ALPHA, X4
MULSD ALPHA_2, X5
MOVSD X2, (X_PTR) // x[i] = X_i
MOVSD X3, (X_PTR)(INC_X*1)
MOVSD X4, (X_PTR)(INC_X*2)
MOVSD X5, (X_PTR)(INCx3_X*1)
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0
JE end // if TAIL == 0 { return }
tail_start: // Reset loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( LEN / 2 )
JZ tail_one
tail_two: // do {
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA, X3
MOVSD X2, (X_PTR) // x[i] = X_i
MOVSD X3, (X_PTR)(INC_X*1)
LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2])
ANDQ $1, TAIL
JZ end
tail_one:
MOVSD (X_PTR), X2 // X_i = x[i]
MULSD ALPHA, X2 // X_i *= ALPHA
MOVSD X2, (X_PTR) // x[i] = X_i
end:
RET

View File

@@ -0,0 +1,122 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Some of the loop unrolling code is copied from:
// http://golang.org/src/math/big/arith_amd64.s
// which is distributed under these terms:
//
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define X_PTR SI
#define DST_PTR DI
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R9
#define INC_DST R10
#define INCx3_DST R11
#define ALPHA X0
#define ALPHA_2 X1
// func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr)
TEXT ·ScalIncTo(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst
MOVQ incDst+24(FP), INC_DST // INC_DST = incDst
SHLQ $3, INC_DST // INC_DST *= sizeof(float64)
MOVSD alpha+32(FP), ALPHA // ALPHA = alpha
MOVQ x_base+40(FP), X_PTR // X_PTR = &x
MOVQ n+64(FP), LEN // LEN = n
MOVQ incX+72(FP), INC_X // INC_X = incX
SHLQ $3, INC_X // INC_X *= sizeof(float64)
CMPQ LEN, $0
JE end // if LEN == 0 { return }
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = LEN % 4
SHRQ $2, LEN // LEN = floor( LEN / 4 )
JZ tail_start // if LEN == 0 { goto tail_start }
MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3
loop: // do { // x[i] *= alpha unrolled 4x.
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MOVSD (X_PTR)(INC_X*2), X4
MOVSD (X_PTR)(INCx3_X*1), X5
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA_2, X3
MULSD ALPHA, X4
MULSD ALPHA_2, X5
MOVSD X2, (DST_PTR) // dst[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
MOVSD X4, (DST_PTR)(INC_DST*2)
MOVSD X5, (DST_PTR)(INCx3_DST*1)
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4])
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0
JE end // if TAIL == 0 { return }
tail_start: // Reset loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( LEN / 2 )
JZ tail_one
tail_two:
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA, X3
MOVSD X2, (DST_PTR) // dst[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2])
LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incDst*2])
ANDQ $1, TAIL
JZ end
tail_one:
MOVSD (X_PTR), X2 // X_i = x[i]
MULSD ALPHA, X2 // X_i *= ALPHA
MOVSD X2, (DST_PTR) // x[i] = X_i
end:
RET

View File

@@ -0,0 +1,112 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Some of the loop unrolling code is copied from:
// http://golang.org/src/math/big/arith_amd64.s
// which is distributed under these terms:
//
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // @ MOVDDUP XMM0, 8[RSP]
#define X_PTR SI
#define DST_PTR DI
#define IDX AX
#define LEN CX
#define TAIL BX
#define ALPHA X0
#define ALPHA_2 X1
// func ScalUnitary(alpha float64, x []float64)
TEXT ·ScalUnitary(SB), NOSPLIT, $0
MOVDDUP_ALPHA // ALPHA = { alpha, alpha }
MOVQ x_base+8(FP), X_PTR // X_PTR = &x
MOVQ x_len+16(FP), LEN // LEN = len(x)
CMPQ LEN, $0
JE end // if LEN == 0 { return }
XORQ IDX, IDX // IDX = 0
MOVQ LEN, TAIL
ANDQ $7, TAIL // TAIL = LEN % 8
SHRQ $3, LEN // LEN = floor( LEN / 8 )
JZ tail_start // if LEN == 0 { goto tail_start }
MOVUPS ALPHA, ALPHA_2
loop: // do { // x[i] *= alpha unrolled 8x.
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MOVUPS 16(X_PTR)(IDX*8), X3
MOVUPS 32(X_PTR)(IDX*8), X4
MOVUPS 48(X_PTR)(IDX*8), X5
MULPD ALPHA, X2 // X_i *= ALPHA
MULPD ALPHA_2, X3
MULPD ALPHA, X4
MULPD ALPHA_2, X5
MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i
MOVUPS X3, 16(X_PTR)(IDX*8)
MOVUPS X4, 32(X_PTR)(IDX*8)
MOVUPS X5, 48(X_PTR)(IDX*8)
ADDQ $8, IDX // i += 8
DECQ LEN
JNZ loop // while --LEN > 0
CMPQ TAIL, $0
JE end // if TAIL == 0 { return }
tail_start: // Reset loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( TAIL / 2 )
JZ tail_one // if n == 0 goto end
tail_two: // do {
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MULPD ALPHA, X2 // X_i *= ALPHA
MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i
ADDQ $2, IDX // i += 2
DECQ LEN
JNZ tail_two // while --LEN > 0
ANDQ $1, TAIL
JZ end // if TAIL == 0 { return }
tail_one:
// x[i] *= alpha for the remaining element.
MOVSD (X_PTR)(IDX*8), X2
MULSD ALPHA, X2
MOVSD X2, (X_PTR)(IDX*8)
end:
RET

View File

@@ -0,0 +1,113 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Some of the loop unrolling code is copied from:
// http://golang.org/src/math/big/arith_amd64.s
// which is distributed under these terms:
//
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//+build !noasm,!appengine,!safe
#include "textflag.h"
#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x2024 // @ MOVDDUP 32(SP), X0 /*XMM0, 32[RSP]*/
#define X_PTR SI
#define DST_PTR DI
#define IDX AX
#define LEN CX
#define TAIL BX
#define ALPHA X0
#define ALPHA_2 X1
// func ScalUnitaryTo(dst []float64, alpha float64, x []float64)
// This function assumes len(dst) >= len(x).
TEXT ·ScalUnitaryTo(SB), NOSPLIT, $0
MOVQ x_base+32(FP), X_PTR // X_PTR = &x
MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst
MOVDDUP_ALPHA // ALPHA = { alpha, alpha }
MOVQ x_len+40(FP), LEN // LEN = len(x)
CMPQ LEN, $0
JE end // if LEN == 0 { return }
XORQ IDX, IDX // IDX = 0
MOVQ LEN, TAIL
ANDQ $7, TAIL // TAIL = LEN % 8
SHRQ $3, LEN // LEN = floor( LEN / 8 )
JZ tail_start // if LEN == 0 { goto tail_start }
MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining
loop: // do { // dst[i] = alpha * x[i] unrolled 8x.
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MOVUPS 16(X_PTR)(IDX*8), X3
MOVUPS 32(X_PTR)(IDX*8), X4
MOVUPS 48(X_PTR)(IDX*8), X5
MULPD ALPHA, X2 // X_i *= ALPHA
MULPD ALPHA_2, X3
MULPD ALPHA, X4
MULPD ALPHA_2, X5
MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i
MOVUPS X3, 16(DST_PTR)(IDX*8)
MOVUPS X4, 32(DST_PTR)(IDX*8)
MOVUPS X5, 48(DST_PTR)(IDX*8)
ADDQ $8, IDX // i += 8
DECQ LEN
JNZ loop // while --LEN > 0
CMPQ TAIL, $0
JE end // if TAIL == 0 { return }
tail_start: // Reset loop counters
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( TAIL / 2 )
JZ tail_one // if LEN == 0 { goto tail_one }
tail_two: // do {
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MULPD ALPHA, X2 // X_i *= ALPHA
MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i
ADDQ $2, IDX // i += 2
DECQ LEN
JNZ tail_two // while --LEN > 0
ANDQ $1, TAIL
JZ end // if TAIL == 0 { return }
tail_one:
MOVSD (X_PTR)(IDX*8), X2 // X_i = x[i]
MULSD ALPHA, X2 // X_i *= ALPHA
MOVSD X2, (DST_PTR)(IDX*8) // dst[i] = X_i
end:
RET

View File

@@ -0,0 +1,172 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
package f64
// L1Norm is
// for _, v := range x {
// sum += math.Abs(v)
// }
// return sum
func L1Norm(x []float64) (sum float64)
// L1NormInc is
// for i := 0; i < n*incX; i += incX {
// sum += math.Abs(x[i])
// }
// return sum
func L1NormInc(x []float64, n, incX int) (sum float64)
// AddConst is
// for i := range x {
// x[i] += alpha
// }
func AddConst(alpha float64, x []float64)
// Add is
// for i, v := range s {
// dst[i] += v
// }
func Add(dst, s []float64)
// AxpyUnitary is
// for i, v := range x {
// y[i] += alpha * v
// }
func AxpyUnitary(alpha float64, x, y []float64)
// AxpyUnitaryTo is
// for i, v := range x {
// dst[i] = alpha*v + y[i]
// }
func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64)
// AxpyInc is
// for i := 0; i < int(n); i++ {
// y[iy] += alpha * x[ix]
// ix += incX
// iy += incY
// }
func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr)
// AxpyIncTo is
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha*x[ix] + y[iy]
// ix += incX
// iy += incY
// idst += incDst
// }
func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr)
// CumSum is
// if len(s) == 0 {
// return dst
// }
// dst[0] = s[0]
// for i, v := range s[1:] {
// dst[i+1] = dst[i] + v
// }
// return dst
func CumSum(dst, s []float64) []float64
// CumProd is
// if len(s) == 0 {
// return dst
// }
// dst[0] = s[0]
// for i, v := range s[1:] {
// dst[i+1] = dst[i] * v
// }
// return dst
func CumProd(dst, s []float64) []float64
// Div is
// for i, v := range s {
// dst[i] /= v
// }
func Div(dst, s []float64)
// DivTo is
// for i, v := range s {
// dst[i] = v / t[i]
// }
// return dst
func DivTo(dst, x, y []float64) []float64
// DotUnitary is
// for i, v := range x {
// sum += y[i] * v
// }
// return sum
func DotUnitary(x, y []float64) (sum float64)
// DotInc is
// for i := 0; i < int(n); i++ {
// sum += y[iy] * x[ix]
// ix += incX
// iy += incY
// }
// return sum
func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64)
// L1Dist is
// var norm float64
// for i, v := range s {
// norm += math.Abs(t[i] - v)
// }
// return norm
func L1Dist(s, t []float64) float64
// LinfDist is
// var norm float64
// if len(s) == 0 {
// return 0
// }
// norm = math.Abs(t[0] - s[0])
// for i, v := range s[1:] {
// absDiff := math.Abs(t[i+1] - v)
// if absDiff > norm || math.IsNaN(norm) {
// norm = absDiff
// }
// }
// return norm
func LinfDist(s, t []float64) float64
// ScalUnitary is
// for i := range x {
// x[i] *= alpha
// }
func ScalUnitary(alpha float64, x []float64)
// ScalUnitaryTo is
// for i, v := range x {
// dst[i] = alpha * v
// }
func ScalUnitaryTo(dst []float64, alpha float64, x []float64)
// ScalInc is
// var ix uintptr
// for i := 0; i < int(n); i++ {
// x[ix] *= alpha
// ix += incX
// }
func ScalInc(alpha float64, x []float64, n, incX uintptr)
// ScalIncTo is
// var idst, ix uintptr
// for i := 0; i < int(n); i++ {
// dst[idst] = alpha * x[ix]
// ix += incX
// idst += incDst
// }
func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr)
// Sum is
// var sum float64
// for i := range x {
// sum += x[i]
// }
func Sum(x []float64) float64

View File

@@ -0,0 +1,170 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 noasm appengine safe
package f64
import "math"
// L1Norm is
// for _, v := range x {
// sum += math.Abs(v)
// }
// return sum
func L1Norm(x []float64) (sum float64) {
for _, v := range x {
sum += math.Abs(v)
}
return sum
}
// L1NormInc is
// for i := 0; i < n*incX; i += incX {
// sum += math.Abs(x[i])
// }
// return sum
func L1NormInc(x []float64, n, incX int) (sum float64) {
for i := 0; i < n*incX; i += incX {
sum += math.Abs(x[i])
}
return sum
}
// Add is
// for i, v := range s {
// dst[i] += v
// }
func Add(dst, s []float64) {
for i, v := range s {
dst[i] += v
}
}
// AddConst is
// for i := range x {
// x[i] += alpha
// }
func AddConst(alpha float64, x []float64) {
for i := range x {
x[i] += alpha
}
}
// CumSum is
// if len(s) == 0 {
// return dst
// }
// dst[0] = s[0]
// for i, v := range s[1:] {
// dst[i+1] = dst[i] + v
// }
// return dst
func CumSum(dst, s []float64) []float64 {
if len(s) == 0 {
return dst
}
dst[0] = s[0]
for i, v := range s[1:] {
dst[i+1] = dst[i] + v
}
return dst
}
// CumProd is
// if len(s) == 0 {
// return dst
// }
// dst[0] = s[0]
// for i, v := range s[1:] {
// dst[i+1] = dst[i] * v
// }
// return dst
func CumProd(dst, s []float64) []float64 {
if len(s) == 0 {
return dst
}
dst[0] = s[0]
for i, v := range s[1:] {
dst[i+1] = dst[i] * v
}
return dst
}
// Div is
// for i, v := range s {
// dst[i] /= v
// }
func Div(dst, s []float64) {
for i, v := range s {
dst[i] /= v
}
}
// DivTo is
// for i, v := range s {
// dst[i] = v / t[i]
// }
// return dst
func DivTo(dst, s, t []float64) []float64 {
for i, v := range s {
dst[i] = v / t[i]
}
return dst
}
// L1Dist is
// var norm float64
// for i, v := range s {
// norm += math.Abs(t[i] - v)
// }
// return norm
func L1Dist(s, t []float64) float64 {
var norm float64
for i, v := range s {
norm += math.Abs(t[i] - v)
}
return norm
}
// LinfDist is
// var norm float64
// if len(s) == 0 {
// return 0
// }
// norm = math.Abs(t[0] - s[0])
// for i, v := range s[1:] {
// absDiff := math.Abs(t[i+1] - v)
// if absDiff > norm || math.IsNaN(norm) {
// norm = absDiff
// }
// }
// return norm
func LinfDist(s, t []float64) float64 {
var norm float64
if len(s) == 0 {
return 0
}
norm = math.Abs(t[0] - s[0])
for i, v := range s[1:] {
absDiff := math.Abs(t[i+1] - v)
if absDiff > norm || math.IsNaN(norm) {
norm = absDiff
}
}
return norm
}
// Sum is
// var sum float64
// for i := range x {
// sum += x[i]
// }
func Sum(x []float64) float64 {
var sum float64
for _, v := range x {
sum += v
}
return sum
}

100
vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,100 @@
// Copyright ©2018 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
#define X_PTR SI
#define IDX AX
#define LEN CX
#define TAIL BX
#define SUM X0
#define SUM_1 X1
#define SUM_2 X2
#define SUM_3 X3
// func Sum(x []float64) float64
TEXT ·Sum(SB), NOSPLIT, $0
MOVQ x_base+0(FP), X_PTR // X_PTR = &x
MOVQ x_len+8(FP), LEN // LEN = len(x)
XORQ IDX, IDX // i = 0
PXOR SUM, SUM // p_sum_i = 0
CMPQ LEN, $0 // if LEN == 0 { return 0 }
JE sum_end
PXOR SUM_1, SUM_1
PXOR SUM_2, SUM_2
PXOR SUM_3, SUM_3
MOVQ X_PTR, TAIL // Check memory alignment
ANDQ $15, TAIL // TAIL = &y % 16
JZ no_trim // if TAIL == 0 { goto no_trim }
// Align on 16-byte boundary
ADDSD (X_PTR), X0 // X0 += x[0]
INCQ IDX // i++
DECQ LEN // LEN--
DECQ TAIL // TAIL--
JZ sum_end // if TAIL == 0 { return }
no_trim:
MOVQ LEN, TAIL
SHRQ $4, LEN // LEN = floor( n / 16 )
JZ sum_tail8 // if LEN == 0 { goto sum_tail8 }
sum_loop: // sum 16x wide do {
ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2]
ADDPD 16(SI)(AX*8), SUM_1
ADDPD 32(SI)(AX*8), SUM_2
ADDPD 48(SI)(AX*8), SUM_3
ADDPD 64(SI)(AX*8), SUM
ADDPD 80(SI)(AX*8), SUM_1
ADDPD 96(SI)(AX*8), SUM_2
ADDPD 112(SI)(AX*8), SUM_3
ADDQ $16, IDX // i += 16
DECQ LEN
JNZ sum_loop // } while --CX > 0
sum_tail8:
TESTQ $8, TAIL
JZ sum_tail4
ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2]
ADDPD 16(SI)(AX*8), SUM_1
ADDPD 32(SI)(AX*8), SUM_2
ADDPD 48(SI)(AX*8), SUM_3
ADDQ $8, IDX
sum_tail4:
ADDPD SUM_3, SUM
ADDPD SUM_2, SUM_1
TESTQ $4, TAIL
JZ sum_tail2
ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2]
ADDPD 16(SI)(AX*8), SUM_1
ADDQ $4, IDX
sum_tail2:
ADDPD SUM_1, SUM
TESTQ $2, TAIL
JZ sum_tail1
ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2]
ADDQ $2, IDX
sum_tail1:
HADDPD SUM, SUM // sum_i[0] += sum_i[1]
TESTQ $1, TAIL
JZ sum_end
ADDSD (SI)(IDX*8), SUM
sum_end: // return sum
MOVSD SUM, sum+24(FP)
RET