diff --git a/blas/blas32/blas32.go b/blas/blas32/blas32.go index 73e53344..ddfe357d 100644 --- a/blas/blas32/blas32.go +++ b/blas/blas32/blas32.go @@ -105,7 +105,9 @@ const ( ) // Dot computes the dot product of the two vectors: -// \sum_i x[i]*y[i]. +// +// \sum_i x[i]*y[i]. +// // Dot will panic if the lengths of x and y do not match. func Dot(x, y Vector) float32 { if x.N != y.N { @@ -115,7 +117,9 @@ func Dot(x, y Vector) float32 { } // DDot computes the dot product of the two vectors: -// \sum_i x[i]*y[i]. +// +// \sum_i x[i]*y[i]. +// // DDot will panic if the lengths of x and y do not match. func DDot(x, y Vector) float64 { if x.N != y.N { @@ -125,7 +129,9 @@ func DDot(x, y Vector) float64 { } // SDDot computes the dot product of the two vectors adding a constant: -// alpha + \sum_i x[i]*y[i]. +// +// alpha + \sum_i x[i]*y[i]. +// // SDDot will panic if the lengths of x and y do not match. func SDDot(alpha float32, x, y Vector) float32 { if x.N != y.N { @@ -135,7 +141,8 @@ func SDDot(alpha float32, x, y Vector) float32 { } // Nrm2 computes the Euclidean norm of the vector x: -// sqrt(\sum_i x[i]*x[i]). +// +// sqrt(\sum_i x[i]*x[i]). // // Nrm2 will panic if the vector increment is negative. func Nrm2(x Vector) float32 { @@ -146,7 +153,8 @@ func Nrm2(x Vector) float32 { } // Asum computes the sum of the absolute values of the elements of x: -// \sum_i |x[i]|. +// +// \sum_i |x[i]|. // // Asum will panic if the vector increment is negative. func Asum(x Vector) float32 { @@ -169,7 +177,9 @@ func Iamax(x Vector) int { } // Swap exchanges the elements of the two vectors: -// x[i], y[i] = y[i], x[i] for all i. +// +// x[i], y[i] = y[i], x[i] for all i. +// // Swap will panic if the lengths of x and y do not match. func Swap(x, y Vector) { if x.N != y.N { @@ -179,7 +189,9 @@ func Swap(x, y Vector) { } // Copy copies the elements of x into the elements of y: -// y[i] = x[i] for all i. +// +// y[i] = x[i] for all i. +// // Copy will panic if the lengths of x and y do not match. func Copy(x, y Vector) { if x.N != y.N { @@ -189,7 +201,9 @@ func Copy(x, y Vector) { } // Axpy adds x scaled by alpha to y: -// y[i] += alpha*x[i] for all i. +// +// y[i] += alpha*x[i] for all i. +// // Axpy will panic if the lengths of x and y do not match. func Axpy(alpha float32, x, y Vector) { if x.N != y.N { @@ -199,17 +213,22 @@ func Axpy(alpha float32, x, y Vector) { } // Rotg computes the parameters of a Givens plane rotation so that -// ⎡ c s⎤ ⎡a⎤ ⎡r⎤ -// ⎣-s c⎦ * ⎣b⎦ = ⎣0⎦ +// +// ⎡ c s⎤ ⎡a⎤ ⎡r⎤ +// ⎣-s c⎦ * ⎣b⎦ = ⎣0⎦ +// // where a and b are the Cartesian coordinates of a given point. // c, s, and r are defined as -// r = ±Sqrt(a^2 + b^2), -// c = a/r, the cosine of the rotation angle, -// s = a/r, the sine of the rotation angle, +// +// r = ±Sqrt(a^2 + b^2), +// c = a/r, the cosine of the rotation angle, +// s = a/r, the sine of the rotation angle, +// // and z is defined such that -// if |a| > |b|, z = s, -// otherwise if c != 0, z = 1/c, -// otherwise z = 1. +// +// if |a| > |b|, z = s, +// otherwise if c != 0, z = 1/c, +// otherwise z = 1. func Rotg(a, b float32) (c, s, r, z float32) { return blas32.Srotg(a, b) } @@ -223,8 +242,9 @@ func Rotmg(d1, d2, b1, b2 float32) (p blas.SrotmParams, rd1, rd2, rb1 float32) { // Rot applies a plane transformation to n points represented by the vectors x // and y: -// x[i] = c*x[i] + s*y[i], -// y[i] = -s*x[i] + c*y[i], for all i. +// +// x[i] = c*x[i] + s*y[i], +// y[i] = -s*x[i] + c*y[i], for all i. func Rot(n int, x, y Vector, c, s float32) { blas32.Srot(n, x.Data, x.Inc, y.Data, y.Inc, c, s) } @@ -236,7 +256,8 @@ func Rotm(n int, x, y Vector, p blas.SrotmParams) { } // Scal scales the vector x by alpha: -// x[i] *= alpha for all i. +// +// x[i] *= alpha for all i. // // Scal will panic if the vector increment is negative. func Scal(alpha float32, x Vector) { @@ -249,48 +270,60 @@ func Scal(alpha float32, x Vector) { // Level 2 // Gemv computes -// y = alpha * A * x + beta * y if t == blas.NoTrans, -// y = alpha * Aᵀ * x + beta * y if t == blas.Trans or blas.ConjTrans, +// +// y = alpha * A * x + beta * y if t == blas.NoTrans, +// y = alpha * Aᵀ * x + beta * y if t == blas.Trans or blas.ConjTrans, +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. func Gemv(t blas.Transpose, alpha float32, a General, x Vector, beta float32, y Vector) { blas32.Sgemv(t, a.Rows, a.Cols, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) } // Gbmv computes -// y = alpha * A * x + beta * y if t == blas.NoTrans, -// y = alpha * Aᵀ * x + beta * y if t == blas.Trans or blas.ConjTrans, +// +// y = alpha * A * x + beta * y if t == blas.NoTrans, +// y = alpha * Aᵀ * x + beta * y if t == blas.Trans or blas.ConjTrans, +// // where A is an m×n band matrix, x and y are vectors, and alpha and beta are scalars. func Gbmv(t blas.Transpose, alpha float32, a Band, x Vector, beta float32, y Vector) { blas32.Sgbmv(t, a.Rows, a.Cols, a.KL, a.KU, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) } // Trmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular matrix, and x is a vector. func Trmv(t blas.Transpose, a Triangular, x Vector) { blas32.Strmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) } // Tbmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular band matrix, and x is a vector. func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { blas32.Stbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) } // Tpmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular matrix in packed format, and x is a vector. func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { blas32.Stpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) } // Trsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular matrix, and x and b are vectors. // // At entry to the function, x contains the values of b, and the result is @@ -303,8 +336,10 @@ func Trsv(t blas.Transpose, a Triangular, x Vector) { } // Tbsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular band matrix, and x and b are vectors. // // At entry to the function, x contains the values of b, and the result is @@ -317,8 +352,10 @@ func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { } // Tpsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular matrix in packed format, and x and b are // vectors. // @@ -332,7 +369,9 @@ func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { } // Symv computes -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n symmetric matrix, x and y are vectors, and alpha and // beta are scalars. func Symv(alpha float32, a Symmetric, x Vector, beta float32, y Vector) { @@ -340,7 +379,9 @@ func Symv(alpha float32, a Symmetric, x Vector, beta float32, y Vector) { } // Sbmv performs -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n symmetric band matrix, x and y are vectors, and alpha // and beta are scalars. func Sbmv(alpha float32, a SymmetricBand, x Vector, beta float32, y Vector) { @@ -348,7 +389,9 @@ func Sbmv(alpha float32, a SymmetricBand, x Vector, beta float32, y Vector) { } // Spmv performs -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n symmetric matrix in packed format, x and y are vectors, // and alpha and beta are scalars. func Spmv(alpha float32, a SymmetricPacked, x Vector, beta float32, y Vector) { @@ -356,21 +399,27 @@ func Spmv(alpha float32, a SymmetricPacked, x Vector, beta float32, y Vector) { } // Ger performs a rank-1 update -// A += alpha * x * yᵀ, +// +// A += alpha * x * yᵀ, +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func Ger(alpha float32, x, y Vector, a General) { blas32.Sger(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) } // Syr performs a rank-1 update -// A += alpha * x * xᵀ, +// +// A += alpha * x * xᵀ, +// // where A is an n×n symmetric matrix, x is a vector, and alpha is a scalar. func Syr(alpha float32, x Vector, a Symmetric) { blas32.Ssyr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) } // Spr performs the rank-1 update -// A += alpha * x * xᵀ, +// +// A += alpha * x * xᵀ, +// // where A is an n×n symmetric matrix in packed format, x is a vector, and // alpha is a scalar. func Spr(alpha float32, x Vector, a SymmetricPacked) { @@ -378,14 +427,18 @@ func Spr(alpha float32, x Vector, a SymmetricPacked) { } // Syr2 performs a rank-2 update -// A += alpha * x * yᵀ + alpha * y * xᵀ, +// +// A += alpha * x * yᵀ + alpha * y * xᵀ, +// // where A is a symmetric n×n matrix, x and y are vectors, and alpha is a scalar. func Syr2(alpha float32, x, y Vector, a Symmetric) { blas32.Ssyr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) } // Spr2 performs a rank-2 update -// A += alpha * x * yᵀ + alpha * y * xᵀ, +// +// A += alpha * x * yᵀ + alpha * y * xᵀ, +// // where A is an n×n symmetric matrix in packed format, x and y are vectors, // and alpha is a scalar. func Spr2(alpha float32, x, y Vector, a SymmetricPacked) { @@ -395,7 +448,9 @@ func Spr2(alpha float32, x, y Vector, a SymmetricPacked) { // Level 3 // Gemm computes -// C = alpha * A * B + beta * C, +// +// C = alpha * A * B + beta * C, +// // where A, B, and C are dense matrices, and alpha and beta are scalars. // tA and tB specify whether A or B are transposed. func Gemm(tA, tB blas.Transpose, alpha float32, a, b General, beta float32, c General) { @@ -414,8 +469,10 @@ func Gemm(tA, tB blas.Transpose, alpha float32, a, b General, beta float32, c Ge } // Symm performs -// C = alpha * A * B + beta * C if s == blas.Left, -// C = alpha * B * A + beta * C if s == blas.Right, +// +// C = alpha * A * B + beta * C if s == blas.Left, +// C = alpha * B * A + beta * C if s == blas.Right, +// // where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and // alpha is a scalar. func Symm(s blas.Side, alpha float32, a Symmetric, b General, beta float32, c General) { @@ -429,8 +486,10 @@ func Symm(s blas.Side, alpha float32, a Symmetric, b General, beta float32, c Ge } // Syrk performs a symmetric rank-k update -// C = alpha * A * Aᵀ + beta * C if t == blas.NoTrans, -// C = alpha * Aᵀ * A + beta * C if t == blas.Trans or blas.ConjTrans, +// +// C = alpha * A * Aᵀ + beta * C if t == blas.NoTrans, +// C = alpha * Aᵀ * A + beta * C if t == blas.Trans or blas.ConjTrans, +// // where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans and // a k×n matrix otherwise, and alpha and beta are scalars. func Syrk(t blas.Transpose, alpha float32, a General, beta float32, c Symmetric) { @@ -444,8 +503,10 @@ func Syrk(t blas.Transpose, alpha float32, a General, beta float32, c Symmetric) } // Syr2k performs a symmetric rank-2k update -// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if t == blas.NoTrans, -// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if t == blas.Trans or blas.ConjTrans, +// +// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if t == blas.NoTrans, +// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if t == blas.Trans or blas.ConjTrans, +// // where C is an n×n symmetric matrix, A and B are n×k matrices if t == NoTrans // and k×n matrices otherwise, and alpha and beta are scalars. func Syr2k(t blas.Transpose, alpha float32, a, b General, beta float32, c Symmetric) { @@ -459,10 +520,12 @@ func Syr2k(t blas.Transpose, alpha float32, a, b General, beta float32, c Symmet } // Trmm performs -// B = alpha * A * B if tA == blas.NoTrans and s == blas.Left, -// B = alpha * Aᵀ * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, -// B = alpha * B * A if tA == blas.NoTrans and s == blas.Right, -// B = alpha * B * Aᵀ if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// +// B = alpha * A * B if tA == blas.NoTrans and s == blas.Left, +// B = alpha * Aᵀ * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, +// B = alpha * B * A if tA == blas.NoTrans and s == blas.Right, +// B = alpha * B * Aᵀ if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// // where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is // a scalar. func Trmm(s blas.Side, tA blas.Transpose, alpha float32, a Triangular, b General) { @@ -470,10 +533,12 @@ func Trmm(s blas.Side, tA blas.Transpose, alpha float32, a Triangular, b General } // Trsm solves -// A * X = alpha * B if tA == blas.NoTrans and s == blas.Left, -// Aᵀ * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, -// X * A = alpha * B if tA == blas.NoTrans and s == blas.Right, -// X * Aᵀ = alpha * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// +// A * X = alpha * B if tA == blas.NoTrans and s == blas.Left, +// Aᵀ * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, +// X * A = alpha * B if tA == blas.NoTrans and s == blas.Right, +// X * Aᵀ = alpha * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// // where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and // alpha is a scalar. // diff --git a/blas/blas64/blas64.go b/blas/blas64/blas64.go index 5871321e..c336dc89 100644 --- a/blas/blas64/blas64.go +++ b/blas/blas64/blas64.go @@ -105,7 +105,9 @@ const ( ) // Dot computes the dot product of the two vectors: -// \sum_i x[i]*y[i]. +// +// \sum_i x[i]*y[i]. +// // Dot will panic if the lengths of x and y do not match. func Dot(x, y Vector) float64 { if x.N != y.N { @@ -115,7 +117,8 @@ func Dot(x, y Vector) float64 { } // Nrm2 computes the Euclidean norm of the vector x: -// sqrt(\sum_i x[i]*x[i]). +// +// sqrt(\sum_i x[i]*x[i]). // // Nrm2 will panic if the vector increment is negative. func Nrm2(x Vector) float64 { @@ -126,7 +129,8 @@ func Nrm2(x Vector) float64 { } // Asum computes the sum of the absolute values of the elements of x: -// \sum_i |x[i]|. +// +// \sum_i |x[i]|. // // Asum will panic if the vector increment is negative. func Asum(x Vector) float64 { @@ -149,7 +153,9 @@ func Iamax(x Vector) int { } // Swap exchanges the elements of the two vectors: -// x[i], y[i] = y[i], x[i] for all i. +// +// x[i], y[i] = y[i], x[i] for all i. +// // Swap will panic if the lengths of x and y do not match. func Swap(x, y Vector) { if x.N != y.N { @@ -159,7 +165,9 @@ func Swap(x, y Vector) { } // Copy copies the elements of x into the elements of y: -// y[i] = x[i] for all i. +// +// y[i] = x[i] for all i. +// // Copy will panic if the lengths of x and y do not match. func Copy(x, y Vector) { if x.N != y.N { @@ -169,7 +177,9 @@ func Copy(x, y Vector) { } // Axpy adds x scaled by alpha to y: -// y[i] += alpha*x[i] for all i. +// +// y[i] += alpha*x[i] for all i. +// // Axpy will panic if the lengths of x and y do not match. func Axpy(alpha float64, x, y Vector) { if x.N != y.N { @@ -179,17 +189,22 @@ func Axpy(alpha float64, x, y Vector) { } // Rotg computes the parameters of a Givens plane rotation so that -// ⎡ c s⎤ ⎡a⎤ ⎡r⎤ -// ⎣-s c⎦ * ⎣b⎦ = ⎣0⎦ +// +// ⎡ c s⎤ ⎡a⎤ ⎡r⎤ +// ⎣-s c⎦ * ⎣b⎦ = ⎣0⎦ +// // where a and b are the Cartesian coordinates of a given point. // c, s, and r are defined as -// r = ±Sqrt(a^2 + b^2), -// c = a/r, the cosine of the rotation angle, -// s = a/r, the sine of the rotation angle, +// +// r = ±Sqrt(a^2 + b^2), +// c = a/r, the cosine of the rotation angle, +// s = a/r, the sine of the rotation angle, +// // and z is defined such that -// if |a| > |b|, z = s, -// otherwise if c != 0, z = 1/c, -// otherwise z = 1. +// +// if |a| > |b|, z = s, +// otherwise if c != 0, z = 1/c, +// otherwise z = 1. func Rotg(a, b float64) (c, s, r, z float64) { return blas64.Drotg(a, b) } @@ -203,8 +218,9 @@ func Rotmg(d1, d2, b1, b2 float64) (p blas.DrotmParams, rd1, rd2, rb1 float64) { // Rot applies a plane transformation to n points represented by the vectors x // and y: -// x[i] = c*x[i] + s*y[i], -// y[i] = -s*x[i] + c*y[i], for all i. +// +// x[i] = c*x[i] + s*y[i], +// y[i] = -s*x[i] + c*y[i], for all i. func Rot(x, y Vector, c, s float64) { if x.N != y.N { panic(badLength) @@ -222,7 +238,8 @@ func Rotm(x, y Vector, p blas.DrotmParams) { } // Scal scales the vector x by alpha: -// x[i] *= alpha for all i. +// +// x[i] *= alpha for all i. // // Scal will panic if the vector increment is negative. func Scal(alpha float64, x Vector) { @@ -235,48 +252,60 @@ func Scal(alpha float64, x Vector) { // Level 2 // Gemv computes -// y = alpha * A * x + beta * y if t == blas.NoTrans, -// y = alpha * Aᵀ * x + beta * y if t == blas.Trans or blas.ConjTrans, +// +// y = alpha * A * x + beta * y if t == blas.NoTrans, +// y = alpha * Aᵀ * x + beta * y if t == blas.Trans or blas.ConjTrans, +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. func Gemv(t blas.Transpose, alpha float64, a General, x Vector, beta float64, y Vector) { blas64.Dgemv(t, a.Rows, a.Cols, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) } // Gbmv computes -// y = alpha * A * x + beta * y if t == blas.NoTrans, -// y = alpha * Aᵀ * x + beta * y if t == blas.Trans or blas.ConjTrans, +// +// y = alpha * A * x + beta * y if t == blas.NoTrans, +// y = alpha * Aᵀ * x + beta * y if t == blas.Trans or blas.ConjTrans, +// // where A is an m×n band matrix, x and y are vectors, and alpha and beta are scalars. func Gbmv(t blas.Transpose, alpha float64, a Band, x Vector, beta float64, y Vector) { blas64.Dgbmv(t, a.Rows, a.Cols, a.KL, a.KU, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) } // Trmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular matrix, and x is a vector. func Trmv(t blas.Transpose, a Triangular, x Vector) { blas64.Dtrmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) } // Tbmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular band matrix, and x is a vector. func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { blas64.Dtbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) } // Tpmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular matrix in packed format, and x is a vector. func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { blas64.Dtpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) } // Trsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular matrix, and x and b are vectors. // // At entry to the function, x contains the values of b, and the result is @@ -289,8 +318,10 @@ func Trsv(t blas.Transpose, a Triangular, x Vector) { } // Tbsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular band matrix, and x and b are vectors. // // At entry to the function, x contains the values of b, and the result is @@ -303,8 +334,10 @@ func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { } // Tpsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans or blas.ConjTrans, +// // where A is an n×n triangular matrix in packed format, and x and b are // vectors. // @@ -318,7 +351,9 @@ func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { } // Symv computes -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n symmetric matrix, x and y are vectors, and alpha and // beta are scalars. func Symv(alpha float64, a Symmetric, x Vector, beta float64, y Vector) { @@ -326,7 +361,9 @@ func Symv(alpha float64, a Symmetric, x Vector, beta float64, y Vector) { } // Sbmv performs -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n symmetric band matrix, x and y are vectors, and alpha // and beta are scalars. func Sbmv(alpha float64, a SymmetricBand, x Vector, beta float64, y Vector) { @@ -334,7 +371,9 @@ func Sbmv(alpha float64, a SymmetricBand, x Vector, beta float64, y Vector) { } // Spmv performs -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n symmetric matrix in packed format, x and y are vectors, // and alpha and beta are scalars. func Spmv(alpha float64, a SymmetricPacked, x Vector, beta float64, y Vector) { @@ -342,21 +381,27 @@ func Spmv(alpha float64, a SymmetricPacked, x Vector, beta float64, y Vector) { } // Ger performs a rank-1 update -// A += alpha * x * yᵀ, +// +// A += alpha * x * yᵀ, +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func Ger(alpha float64, x, y Vector, a General) { blas64.Dger(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) } // Syr performs a rank-1 update -// A += alpha * x * xᵀ, +// +// A += alpha * x * xᵀ, +// // where A is an n×n symmetric matrix, x is a vector, and alpha is a scalar. func Syr(alpha float64, x Vector, a Symmetric) { blas64.Dsyr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) } // Spr performs the rank-1 update -// A += alpha * x * xᵀ, +// +// A += alpha * x * xᵀ, +// // where A is an n×n symmetric matrix in packed format, x is a vector, and // alpha is a scalar. func Spr(alpha float64, x Vector, a SymmetricPacked) { @@ -364,14 +409,18 @@ func Spr(alpha float64, x Vector, a SymmetricPacked) { } // Syr2 performs a rank-2 update -// A += alpha * x * yᵀ + alpha * y * xᵀ, +// +// A += alpha * x * yᵀ + alpha * y * xᵀ, +// // where A is a symmetric n×n matrix, x and y are vectors, and alpha is a scalar. func Syr2(alpha float64, x, y Vector, a Symmetric) { blas64.Dsyr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) } // Spr2 performs a rank-2 update -// A += alpha * x * yᵀ + alpha * y * xᵀ, +// +// A += alpha * x * yᵀ + alpha * y * xᵀ, +// // where A is an n×n symmetric matrix in packed format, x and y are vectors, // and alpha is a scalar. func Spr2(alpha float64, x, y Vector, a SymmetricPacked) { @@ -381,7 +430,9 @@ func Spr2(alpha float64, x, y Vector, a SymmetricPacked) { // Level 3 // Gemm computes -// C = alpha * A * B + beta * C, +// +// C = alpha * A * B + beta * C, +// // where A, B, and C are dense matrices, and alpha and beta are scalars. // tA and tB specify whether A or B are transposed. func Gemm(tA, tB blas.Transpose, alpha float64, a, b General, beta float64, c General) { @@ -400,8 +451,10 @@ func Gemm(tA, tB blas.Transpose, alpha float64, a, b General, beta float64, c Ge } // Symm performs -// C = alpha * A * B + beta * C if s == blas.Left, -// C = alpha * B * A + beta * C if s == blas.Right, +// +// C = alpha * A * B + beta * C if s == blas.Left, +// C = alpha * B * A + beta * C if s == blas.Right, +// // where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and // alpha is a scalar. func Symm(s blas.Side, alpha float64, a Symmetric, b General, beta float64, c General) { @@ -415,8 +468,10 @@ func Symm(s blas.Side, alpha float64, a Symmetric, b General, beta float64, c Ge } // Syrk performs a symmetric rank-k update -// C = alpha * A * Aᵀ + beta * C if t == blas.NoTrans, -// C = alpha * Aᵀ * A + beta * C if t == blas.Trans or blas.ConjTrans, +// +// C = alpha * A * Aᵀ + beta * C if t == blas.NoTrans, +// C = alpha * Aᵀ * A + beta * C if t == blas.Trans or blas.ConjTrans, +// // where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans and // a k×n matrix otherwise, and alpha and beta are scalars. func Syrk(t blas.Transpose, alpha float64, a General, beta float64, c Symmetric) { @@ -430,8 +485,10 @@ func Syrk(t blas.Transpose, alpha float64, a General, beta float64, c Symmetric) } // Syr2k performs a symmetric rank-2k update -// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if t == blas.NoTrans, -// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if t == blas.Trans or blas.ConjTrans, +// +// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if t == blas.NoTrans, +// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if t == blas.Trans or blas.ConjTrans, +// // where C is an n×n symmetric matrix, A and B are n×k matrices if t == NoTrans // and k×n matrices otherwise, and alpha and beta are scalars. func Syr2k(t blas.Transpose, alpha float64, a, b General, beta float64, c Symmetric) { @@ -445,10 +502,12 @@ func Syr2k(t blas.Transpose, alpha float64, a, b General, beta float64, c Symmet } // Trmm performs -// B = alpha * A * B if tA == blas.NoTrans and s == blas.Left, -// B = alpha * Aᵀ * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, -// B = alpha * B * A if tA == blas.NoTrans and s == blas.Right, -// B = alpha * B * Aᵀ if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// +// B = alpha * A * B if tA == blas.NoTrans and s == blas.Left, +// B = alpha * Aᵀ * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, +// B = alpha * B * A if tA == blas.NoTrans and s == blas.Right, +// B = alpha * B * Aᵀ if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// // where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is // a scalar. func Trmm(s blas.Side, tA blas.Transpose, alpha float64, a Triangular, b General) { @@ -456,10 +515,12 @@ func Trmm(s blas.Side, tA blas.Transpose, alpha float64, a Triangular, b General } // Trsm solves -// A * X = alpha * B if tA == blas.NoTrans and s == blas.Left, -// Aᵀ * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, -// X * A = alpha * B if tA == blas.NoTrans and s == blas.Right, -// X * Aᵀ = alpha * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// +// A * X = alpha * B if tA == blas.NoTrans and s == blas.Left, +// Aᵀ * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, +// X * A = alpha * B if tA == blas.NoTrans and s == blas.Right, +// X * Aᵀ = alpha * B if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// // where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and // alpha is a scalar. // diff --git a/blas/cblas128/cblas128.go b/blas/cblas128/cblas128.go index 4b745843..82a6f22e 100644 --- a/blas/cblas128/cblas128.go +++ b/blas/cblas128/cblas128.go @@ -115,7 +115,9 @@ const ( // Dotu computes the dot product of the two vectors without // complex conjugation: -// xᵀ * y. +// +// xᵀ * y. +// // Dotu will panic if the lengths of x and y do not match. func Dotu(x, y Vector) complex128 { if x.N != y.N { @@ -126,7 +128,9 @@ func Dotu(x, y Vector) complex128 { // Dotc computes the dot product of the two vectors with // complex conjugation: -// xᴴ * y. +// +// xᴴ * y. +// // Dotc will panic if the lengths of x and y do not match. func Dotc(x, y Vector) complex128 { if x.N != y.N { @@ -136,7 +140,8 @@ func Dotc(x, y Vector) complex128 { } // Nrm2 computes the Euclidean norm of the vector x: -// sqrt(\sum_i x[i] * x[i]). +// +// sqrt(\sum_i x[i] * x[i]). // // Nrm2 will panic if the vector increment is negative. func Nrm2(x Vector) float64 { @@ -148,7 +153,8 @@ func Nrm2(x Vector) float64 { // Asum computes the sum of magnitudes of the real and imaginary parts of // elements of the vector x: -// \sum_i (|Re x[i]| + |Im x[i]|). +// +// \sum_i (|Re x[i]| + |Im x[i]|). // // Asum will panic if the vector increment is negative. func Asum(x Vector) float64 { @@ -173,7 +179,9 @@ func Iamax(x Vector) int { } // Swap exchanges the elements of two vectors: -// x[i], y[i] = y[i], x[i] for all i. +// +// x[i], y[i] = y[i], x[i] for all i. +// // Swap will panic if the lengths of x and y do not match. func Swap(x, y Vector) { if x.N != y.N { @@ -183,7 +191,9 @@ func Swap(x, y Vector) { } // Copy copies the elements of x into the elements of y: -// y[i] = x[i] for all i. +// +// y[i] = x[i] for all i. +// // Copy will panic if the lengths of x and y do not match. func Copy(x, y Vector) { if x.N != y.N { @@ -193,7 +203,9 @@ func Copy(x, y Vector) { } // Axpy computes -// y = alpha * x + y, +// +// y = alpha * x + y, +// // where x and y are vectors, and alpha is a scalar. // Axpy will panic if the lengths of x and y do not match. func Axpy(alpha complex128, x, y Vector) { @@ -204,7 +216,9 @@ func Axpy(alpha complex128, x, y Vector) { } // Scal computes -// x = alpha * x, +// +// x = alpha * x, +// // where x is a vector, and alpha is a scalar. // // Scal will panic if the vector increment is negative. @@ -216,7 +230,9 @@ func Scal(alpha complex128, x Vector) { } // Dscal computes -// x = alpha * x, +// +// x = alpha * x, +// // where x is a vector, and alpha is a real scalar. // // Dscal will panic if the vector increment is negative. @@ -230,9 +246,11 @@ func Dscal(alpha float64, x Vector) { // Level 2 // Gemv computes -// y = alpha * A * x + beta * y if t == blas.NoTrans, -// y = alpha * Aᵀ * x + beta * y if t == blas.Trans, -// y = alpha * Aᴴ * x + beta * y if t == blas.ConjTrans, +// +// y = alpha * A * x + beta * y if t == blas.NoTrans, +// y = alpha * Aᵀ * x + beta * y if t == blas.Trans, +// y = alpha * Aᴴ * x + beta * y if t == blas.ConjTrans, +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are // scalars. func Gemv(t blas.Transpose, alpha complex128, a General, x Vector, beta complex128, y Vector) { @@ -240,9 +258,11 @@ func Gemv(t blas.Transpose, alpha complex128, a General, x Vector, beta complex1 } // Gbmv computes -// y = alpha * A * x + beta * y if t == blas.NoTrans, -// y = alpha * Aᵀ * x + beta * y if t == blas.Trans, -// y = alpha * Aᴴ * x + beta * y if t == blas.ConjTrans, +// +// y = alpha * A * x + beta * y if t == blas.NoTrans, +// y = alpha * Aᵀ * x + beta * y if t == blas.Trans, +// y = alpha * Aᴴ * x + beta * y if t == blas.ConjTrans, +// // where A is an m×n band matrix, x and y are vectors, and alpha and beta are // scalars. func Gbmv(t blas.Transpose, alpha complex128, a Band, x Vector, beta complex128, y Vector) { @@ -250,36 +270,44 @@ func Gbmv(t blas.Transpose, alpha complex128, a Band, x Vector, beta complex128, } // Trmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans, -// x = Aᴴ * x if t == blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans, +// x = Aᴴ * x if t == blas.ConjTrans, +// // where A is an n×n triangular matrix, and x is a vector. func Trmv(t blas.Transpose, a Triangular, x Vector) { cblas128.Ztrmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) } // Tbmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans, -// x = Aᴴ * x if t == blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans, +// x = Aᴴ * x if t == blas.ConjTrans, +// // where A is an n×n triangular band matrix, and x is a vector. func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { cblas128.Ztbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) } // Tpmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans, -// x = Aᴴ * x if t == blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans, +// x = Aᴴ * x if t == blas.ConjTrans, +// // where A is an n×n triangular matrix in packed format, and x is a vector. func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { cblas128.Ztpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) } // Trsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans, -// Aᴴ * x = b if t == blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans, +// Aᴴ * x = b if t == blas.ConjTrans, +// // where A is an n×n triangular matrix and x is a vector. // // At entry to the function, x contains the values of b, and the result is @@ -292,9 +320,11 @@ func Trsv(t blas.Transpose, a Triangular, x Vector) { } // Tbsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans, -// Aᴴ * x = b if t == blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans, +// Aᴴ * x = b if t == blas.ConjTrans, +// // where A is an n×n triangular band matrix, and x is a vector. // // At entry to the function, x contains the values of b, and the result is @@ -307,9 +337,11 @@ func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { } // Tpsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans, -// Aᴴ * x = b if t == blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans, +// Aᴴ * x = b if t == blas.ConjTrans, +// // where A is an n×n triangular matrix in packed format and x is a vector. // // At entry to the function, x contains the values of b, and the result is @@ -322,7 +354,9 @@ func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { } // Hemv computes -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n Hermitian matrix, x and y are vectors, and alpha and // beta are scalars. func Hemv(alpha complex128, a Hermitian, x Vector, beta complex128, y Vector) { @@ -330,7 +364,9 @@ func Hemv(alpha complex128, a Hermitian, x Vector, beta complex128, y Vector) { } // Hbmv performs -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n Hermitian band matrix, x and y are vectors, and alpha // and beta are scalars. func Hbmv(alpha complex128, a HermitianBand, x Vector, beta complex128, y Vector) { @@ -338,7 +374,9 @@ func Hbmv(alpha complex128, a HermitianBand, x Vector, beta complex128, y Vector } // Hpmv performs -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n Hermitian matrix in packed format, x and y are vectors, // and alpha and beta are scalars. func Hpmv(alpha complex128, a HermitianPacked, x Vector, beta complex128, y Vector) { @@ -346,28 +384,36 @@ func Hpmv(alpha complex128, a HermitianPacked, x Vector, beta complex128, y Vect } // Geru performs a rank-1 update -// A += alpha * x * yᵀ, +// +// A += alpha * x * yᵀ, +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func Geru(alpha complex128, x, y Vector, a General) { cblas128.Zgeru(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) } // Gerc performs a rank-1 update -// A += alpha * x * yᴴ, +// +// A += alpha * x * yᴴ, +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func Gerc(alpha complex128, x, y Vector, a General) { cblas128.Zgerc(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) } // Her performs a rank-1 update -// A += alpha * x * yᵀ, +// +// A += alpha * x * yᵀ, +// // where A is an m×n Hermitian matrix, x and y are vectors, and alpha is a scalar. func Her(alpha float64, x Vector, a Hermitian) { cblas128.Zher(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) } // Hpr performs a rank-1 update -// A += alpha * x * xᴴ, +// +// A += alpha * x * xᴴ, +// // where A is an n×n Hermitian matrix in packed format, x is a vector, and // alpha is a scalar. func Hpr(alpha float64, x Vector, a HermitianPacked) { @@ -375,14 +421,18 @@ func Hpr(alpha float64, x Vector, a HermitianPacked) { } // Her2 performs a rank-2 update -// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ, +// +// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ, +// // where A is an n×n Hermitian matrix, x and y are vectors, and alpha is a scalar. func Her2(alpha complex128, x, y Vector, a Hermitian) { cblas128.Zher2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) } // Hpr2 performs a rank-2 update -// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ, +// +// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ, +// // where A is an n×n Hermitian matrix in packed format, x and y are vectors, // and alpha is a scalar. func Hpr2(alpha complex128, x, y Vector, a HermitianPacked) { @@ -392,7 +442,9 @@ func Hpr2(alpha complex128, x, y Vector, a HermitianPacked) { // Level 3 // Gemm computes -// C = alpha * A * B + beta * C, +// +// C = alpha * A * B + beta * C, +// // where A, B, and C are dense matrices, and alpha and beta are scalars. // tA and tB specify whether A or B are transposed or conjugated. func Gemm(tA, tB blas.Transpose, alpha complex128, a, b General, beta complex128, c General) { @@ -411,8 +463,10 @@ func Gemm(tA, tB blas.Transpose, alpha complex128, a, b General, beta complex128 } // Symm performs -// C = alpha * A * B + beta * C if s == blas.Left, -// C = alpha * B * A + beta * C if s == blas.Right, +// +// C = alpha * A * B + beta * C if s == blas.Left, +// C = alpha * B * A + beta * C if s == blas.Right, +// // where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and // alpha and beta are scalars. func Symm(s blas.Side, alpha complex128, a Symmetric, b General, beta complex128, c General) { @@ -426,8 +480,10 @@ func Symm(s blas.Side, alpha complex128, a Symmetric, b General, beta complex128 } // Syrk performs a symmetric rank-k update -// C = alpha * A * Aᵀ + beta * C if t == blas.NoTrans, -// C = alpha * Aᵀ * A + beta * C if t == blas.Trans, +// +// C = alpha * A * Aᵀ + beta * C if t == blas.NoTrans, +// C = alpha * Aᵀ * A + beta * C if t == blas.Trans, +// // where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans // and a k×n matrix otherwise, and alpha and beta are scalars. func Syrk(t blas.Transpose, alpha complex128, a General, beta complex128, c Symmetric) { @@ -441,8 +497,10 @@ func Syrk(t blas.Transpose, alpha complex128, a General, beta complex128, c Symm } // Syr2k performs a symmetric rank-2k update -// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if t == blas.NoTrans, -// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if t == blas.Trans, +// +// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if t == blas.NoTrans, +// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if t == blas.Trans, +// // where C is an n×n symmetric matrix, A and B are n×k matrices if // t == blas.NoTrans and k×n otherwise, and alpha and beta are scalars. func Syr2k(t blas.Transpose, alpha complex128, a, b General, beta complex128, c Symmetric) { @@ -456,12 +514,14 @@ func Syr2k(t blas.Transpose, alpha complex128, a, b General, beta complex128, c } // Trmm performs -// B = alpha * A * B if tA == blas.NoTrans and s == blas.Left, -// B = alpha * Aᵀ * B if tA == blas.Trans and s == blas.Left, -// B = alpha * Aᴴ * B if tA == blas.ConjTrans and s == blas.Left, -// B = alpha * B * A if tA == blas.NoTrans and s == blas.Right, -// B = alpha * B * Aᵀ if tA == blas.Trans and s == blas.Right, -// B = alpha * B * Aᴴ if tA == blas.ConjTrans and s == blas.Right, +// +// B = alpha * A * B if tA == blas.NoTrans and s == blas.Left, +// B = alpha * Aᵀ * B if tA == blas.Trans and s == blas.Left, +// B = alpha * Aᴴ * B if tA == blas.ConjTrans and s == blas.Left, +// B = alpha * B * A if tA == blas.NoTrans and s == blas.Right, +// B = alpha * B * Aᵀ if tA == blas.Trans and s == blas.Right, +// B = alpha * B * Aᴴ if tA == blas.ConjTrans and s == blas.Right, +// // where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is // a scalar. func Trmm(s blas.Side, tA blas.Transpose, alpha complex128, a Triangular, b General) { @@ -469,12 +529,14 @@ func Trmm(s blas.Side, tA blas.Transpose, alpha complex128, a Triangular, b Gene } // Trsm solves -// A * X = alpha * B if tA == blas.NoTrans and s == blas.Left, -// Aᵀ * X = alpha * B if tA == blas.Trans and s == blas.Left, -// Aᴴ * X = alpha * B if tA == blas.ConjTrans and s == blas.Left, -// X * A = alpha * B if tA == blas.NoTrans and s == blas.Right, -// X * Aᵀ = alpha * B if tA == blas.Trans and s == blas.Right, -// X * Aᴴ = alpha * B if tA == blas.ConjTrans and s == blas.Right, +// +// A * X = alpha * B if tA == blas.NoTrans and s == blas.Left, +// Aᵀ * X = alpha * B if tA == blas.Trans and s == blas.Left, +// Aᴴ * X = alpha * B if tA == blas.ConjTrans and s == blas.Left, +// X * A = alpha * B if tA == blas.NoTrans and s == blas.Right, +// X * Aᵀ = alpha * B if tA == blas.Trans and s == blas.Right, +// X * Aᴴ = alpha * B if tA == blas.ConjTrans and s == blas.Right, +// // where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and // alpha is a scalar. // @@ -487,8 +549,10 @@ func Trsm(s blas.Side, tA blas.Transpose, alpha complex128, a Triangular, b Gene } // Hemm performs -// C = alpha * A * B + beta * C if s == blas.Left, -// C = alpha * B * A + beta * C if s == blas.Right, +// +// C = alpha * A * B + beta * C if s == blas.Left, +// C = alpha * B * A + beta * C if s == blas.Right, +// // where A is an n×n or m×m Hermitian matrix, B and C are m×n matrices, and // alpha and beta are scalars. func Hemm(s blas.Side, alpha complex128, a Hermitian, b General, beta complex128, c General) { @@ -502,8 +566,10 @@ func Hemm(s blas.Side, alpha complex128, a Hermitian, b General, beta complex128 } // Herk performs the Hermitian rank-k update -// C = alpha * A * Aᴴ + beta*C if t == blas.NoTrans, -// C = alpha * Aᴴ * A + beta*C if t == blas.ConjTrans, +// +// C = alpha * A * Aᴴ + beta*C if t == blas.NoTrans, +// C = alpha * Aᴴ * A + beta*C if t == blas.ConjTrans, +// // where C is an n×n Hermitian matrix, A is an n×k matrix if t == blas.NoTrans // and a k×n matrix otherwise, and alpha and beta are scalars. func Herk(t blas.Transpose, alpha float64, a General, beta float64, c Hermitian) { @@ -517,8 +583,10 @@ func Herk(t blas.Transpose, alpha float64, a General, beta float64, c Hermitian) } // Her2k performs the Hermitian rank-2k update -// C = alpha * A * Bᴴ + conj(alpha) * B * Aᴴ + beta * C if t == blas.NoTrans, -// C = alpha * Aᴴ * B + conj(alpha) * Bᴴ * A + beta * C if t == blas.ConjTrans, +// +// C = alpha * A * Bᴴ + conj(alpha) * B * Aᴴ + beta * C if t == blas.NoTrans, +// C = alpha * Aᴴ * B + conj(alpha) * Bᴴ * A + beta * C if t == blas.ConjTrans, +// // where C is an n×n Hermitian matrix, A and B are n×k matrices if t == NoTrans // and k×n matrices otherwise, and alpha and beta are scalars. func Her2k(t blas.Transpose, alpha complex128, a, b General, beta float64, c Hermitian) { diff --git a/blas/cblas64/cblas64.go b/blas/cblas64/cblas64.go index 29772f1b..f2528880 100644 --- a/blas/cblas64/cblas64.go +++ b/blas/cblas64/cblas64.go @@ -115,7 +115,9 @@ const ( // Dotu computes the dot product of the two vectors without // complex conjugation: -// xᵀ * y +// +// xᵀ * y +// // Dotu will panic if the lengths of x and y do not match. func Dotu(x, y Vector) complex64 { if x.N != y.N { @@ -126,7 +128,9 @@ func Dotu(x, y Vector) complex64 { // Dotc computes the dot product of the two vectors with // complex conjugation: -// xᴴ * y. +// +// xᴴ * y. +// // Dotc will panic if the lengths of x and y do not match. func Dotc(x, y Vector) complex64 { if x.N != y.N { @@ -136,7 +140,8 @@ func Dotc(x, y Vector) complex64 { } // Nrm2 computes the Euclidean norm of the vector x: -// sqrt(\sum_i x[i] * x[i]). +// +// sqrt(\sum_i x[i] * x[i]). // // Nrm2 will panic if the vector increment is negative. func Nrm2(x Vector) float32 { @@ -148,7 +153,8 @@ func Nrm2(x Vector) float32 { // Asum computes the sum of magnitudes of the real and imaginary parts of // elements of the vector x: -// \sum_i (|Re x[i]| + |Im x[i]|). +// +// \sum_i (|Re x[i]| + |Im x[i]|). // // Asum will panic if the vector increment is negative. func Asum(x Vector) float32 { @@ -173,7 +179,9 @@ func Iamax(x Vector) int { } // Swap exchanges the elements of two vectors: -// x[i], y[i] = y[i], x[i] for all i. +// +// x[i], y[i] = y[i], x[i] for all i. +// // Swap will panic if the lengths of x and y do not match. func Swap(x, y Vector) { if x.N != y.N { @@ -183,7 +191,9 @@ func Swap(x, y Vector) { } // Copy copies the elements of x into the elements of y: -// y[i] = x[i] for all i. +// +// y[i] = x[i] for all i. +// // Copy will panic if the lengths of x and y do not match. func Copy(x, y Vector) { if x.N != y.N { @@ -193,7 +203,9 @@ func Copy(x, y Vector) { } // Axpy computes -// y = alpha * x + y, +// +// y = alpha * x + y, +// // where x and y are vectors, and alpha is a scalar. // Axpy will panic if the lengths of x and y do not match. func Axpy(alpha complex64, x, y Vector) { @@ -204,7 +216,9 @@ func Axpy(alpha complex64, x, y Vector) { } // Scal computes -// x = alpha * x, +// +// x = alpha * x, +// // where x is a vector, and alpha is a scalar. // // Scal will panic if the vector increment is negative. @@ -216,7 +230,9 @@ func Scal(alpha complex64, x Vector) { } // Dscal computes -// x = alpha * x, +// +// x = alpha * x, +// // where x is a vector, and alpha is a real scalar. // // Dscal will panic if the vector increment is negative. @@ -230,9 +246,11 @@ func Dscal(alpha float32, x Vector) { // Level 2 // Gemv computes -// y = alpha * A * x + beta * y if t == blas.NoTrans, -// y = alpha * Aᵀ * x + beta * y if t == blas.Trans, -// y = alpha * Aᴴ * x + beta * y if t == blas.ConjTrans, +// +// y = alpha * A * x + beta * y if t == blas.NoTrans, +// y = alpha * Aᵀ * x + beta * y if t == blas.Trans, +// y = alpha * Aᴴ * x + beta * y if t == blas.ConjTrans, +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are // scalars. func Gemv(t blas.Transpose, alpha complex64, a General, x Vector, beta complex64, y Vector) { @@ -240,9 +258,11 @@ func Gemv(t blas.Transpose, alpha complex64, a General, x Vector, beta complex64 } // Gbmv computes -// y = alpha * A * x + beta * y if t == blas.NoTrans, -// y = alpha * Aᵀ * x + beta * y if t == blas.Trans, -// y = alpha * Aᴴ * x + beta * y if t == blas.ConjTrans, +// +// y = alpha * A * x + beta * y if t == blas.NoTrans, +// y = alpha * Aᵀ * x + beta * y if t == blas.Trans, +// y = alpha * Aᴴ * x + beta * y if t == blas.ConjTrans, +// // where A is an m×n band matrix, x and y are vectors, and alpha and beta are // scalars. func Gbmv(t blas.Transpose, alpha complex64, a Band, x Vector, beta complex64, y Vector) { @@ -250,36 +270,44 @@ func Gbmv(t blas.Transpose, alpha complex64, a Band, x Vector, beta complex64, y } // Trmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans, -// x = Aᴴ * x if t == blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans, +// x = Aᴴ * x if t == blas.ConjTrans, +// // where A is an n×n triangular matrix, and x is a vector. func Trmv(t blas.Transpose, a Triangular, x Vector) { cblas64.Ctrmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) } // Tbmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans, -// x = Aᴴ * x if t == blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans, +// x = Aᴴ * x if t == blas.ConjTrans, +// // where A is an n×n triangular band matrix, and x is a vector. func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { cblas64.Ctbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) } // Tpmv computes -// x = A * x if t == blas.NoTrans, -// x = Aᵀ * x if t == blas.Trans, -// x = Aᴴ * x if t == blas.ConjTrans, +// +// x = A * x if t == blas.NoTrans, +// x = Aᵀ * x if t == blas.Trans, +// x = Aᴴ * x if t == blas.ConjTrans, +// // where A is an n×n triangular matrix in packed format, and x is a vector. func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { cblas64.Ctpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) } // Trsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans, -// Aᴴ * x = b if t == blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans, +// Aᴴ * x = b if t == blas.ConjTrans, +// // where A is an n×n triangular matrix and x is a vector. // // At entry to the function, x contains the values of b, and the result is @@ -292,9 +320,11 @@ func Trsv(t blas.Transpose, a Triangular, x Vector) { } // Tbsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans, -// Aᴴ * x = b if t == blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans, +// Aᴴ * x = b if t == blas.ConjTrans, +// // where A is an n×n triangular band matrix, and x is a vector. // // At entry to the function, x contains the values of b, and the result is @@ -307,9 +337,11 @@ func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { } // Tpsv solves -// A * x = b if t == blas.NoTrans, -// Aᵀ * x = b if t == blas.Trans, -// Aᴴ * x = b if t == blas.ConjTrans, +// +// A * x = b if t == blas.NoTrans, +// Aᵀ * x = b if t == blas.Trans, +// Aᴴ * x = b if t == blas.ConjTrans, +// // where A is an n×n triangular matrix in packed format and x is a vector. // // At entry to the function, x contains the values of b, and the result is @@ -322,7 +354,9 @@ func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { } // Hemv computes -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n Hermitian matrix, x and y are vectors, and alpha and // beta are scalars. func Hemv(alpha complex64, a Hermitian, x Vector, beta complex64, y Vector) { @@ -330,7 +364,9 @@ func Hemv(alpha complex64, a Hermitian, x Vector, beta complex64, y Vector) { } // Hbmv performs -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n Hermitian band matrix, x and y are vectors, and alpha // and beta are scalars. func Hbmv(alpha complex64, a HermitianBand, x Vector, beta complex64, y Vector) { @@ -338,7 +374,9 @@ func Hbmv(alpha complex64, a HermitianBand, x Vector, beta complex64, y Vector) } // Hpmv performs -// y = alpha * A * x + beta * y, +// +// y = alpha * A * x + beta * y, +// // where A is an n×n Hermitian matrix in packed format, x and y are vectors, // and alpha and beta are scalars. func Hpmv(alpha complex64, a HermitianPacked, x Vector, beta complex64, y Vector) { @@ -346,28 +384,36 @@ func Hpmv(alpha complex64, a HermitianPacked, x Vector, beta complex64, y Vector } // Geru performs a rank-1 update -// A += alpha * x * yᵀ, +// +// A += alpha * x * yᵀ, +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func Geru(alpha complex64, x, y Vector, a General) { cblas64.Cgeru(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) } // Gerc performs a rank-1 update -// A += alpha * x * yᴴ, +// +// A += alpha * x * yᴴ, +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func Gerc(alpha complex64, x, y Vector, a General) { cblas64.Cgerc(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) } // Her performs a rank-1 update -// A += alpha * x * yᵀ, +// +// A += alpha * x * yᵀ, +// // where A is an m×n Hermitian matrix, x and y are vectors, and alpha is a scalar. func Her(alpha float32, x Vector, a Hermitian) { cblas64.Cher(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) } // Hpr performs a rank-1 update -// A += alpha * x * xᴴ, +// +// A += alpha * x * xᴴ, +// // where A is an n×n Hermitian matrix in packed format, x is a vector, and // alpha is a scalar. func Hpr(alpha float32, x Vector, a HermitianPacked) { @@ -375,14 +421,18 @@ func Hpr(alpha float32, x Vector, a HermitianPacked) { } // Her2 performs a rank-2 update -// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ, +// +// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ, +// // where A is an n×n Hermitian matrix, x and y are vectors, and alpha is a scalar. func Her2(alpha complex64, x, y Vector, a Hermitian) { cblas64.Cher2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) } // Hpr2 performs a rank-2 update -// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ, +// +// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ, +// // where A is an n×n Hermitian matrix in packed format, x and y are vectors, // and alpha is a scalar. func Hpr2(alpha complex64, x, y Vector, a HermitianPacked) { @@ -392,7 +442,9 @@ func Hpr2(alpha complex64, x, y Vector, a HermitianPacked) { // Level 3 // Gemm computes -// C = alpha * A * B + beta * C, +// +// C = alpha * A * B + beta * C, +// // where A, B, and C are dense matrices, and alpha and beta are scalars. // tA and tB specify whether A or B are transposed or conjugated. func Gemm(tA, tB blas.Transpose, alpha complex64, a, b General, beta complex64, c General) { @@ -411,8 +463,10 @@ func Gemm(tA, tB blas.Transpose, alpha complex64, a, b General, beta complex64, } // Symm performs -// C = alpha * A * B + beta * C if s == blas.Left, -// C = alpha * B * A + beta * C if s == blas.Right, +// +// C = alpha * A * B + beta * C if s == blas.Left, +// C = alpha * B * A + beta * C if s == blas.Right, +// // where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and // alpha and beta are scalars. func Symm(s blas.Side, alpha complex64, a Symmetric, b General, beta complex64, c General) { @@ -426,8 +480,10 @@ func Symm(s blas.Side, alpha complex64, a Symmetric, b General, beta complex64, } // Syrk performs a symmetric rank-k update -// C = alpha * A * Aᵀ + beta * C if t == blas.NoTrans, -// C = alpha * Aᵀ * A + beta * C if t == blas.Trans, +// +// C = alpha * A * Aᵀ + beta * C if t == blas.NoTrans, +// C = alpha * Aᵀ * A + beta * C if t == blas.Trans, +// // where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans // and a k×n matrix otherwise, and alpha and beta are scalars. func Syrk(t blas.Transpose, alpha complex64, a General, beta complex64, c Symmetric) { @@ -441,8 +497,10 @@ func Syrk(t blas.Transpose, alpha complex64, a General, beta complex64, c Symmet } // Syr2k performs a symmetric rank-2k update -// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if t == blas.NoTrans, -// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if t == blas.Trans, +// +// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if t == blas.NoTrans, +// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if t == blas.Trans, +// // where C is an n×n symmetric matrix, A and B are n×k matrices if // t == blas.NoTrans and k×n otherwise, and alpha and beta are scalars. func Syr2k(t blas.Transpose, alpha complex64, a, b General, beta complex64, c Symmetric) { @@ -456,12 +514,14 @@ func Syr2k(t blas.Transpose, alpha complex64, a, b General, beta complex64, c Sy } // Trmm performs -// B = alpha * A * B if tA == blas.NoTrans and s == blas.Left, -// B = alpha * Aᵀ * B if tA == blas.Trans and s == blas.Left, -// B = alpha * Aᴴ * B if tA == blas.ConjTrans and s == blas.Left, -// B = alpha * B * A if tA == blas.NoTrans and s == blas.Right, -// B = alpha * B * Aᵀ if tA == blas.Trans and s == blas.Right, -// B = alpha * B * Aᴴ if tA == blas.ConjTrans and s == blas.Right, +// +// B = alpha * A * B if tA == blas.NoTrans and s == blas.Left, +// B = alpha * Aᵀ * B if tA == blas.Trans and s == blas.Left, +// B = alpha * Aᴴ * B if tA == blas.ConjTrans and s == blas.Left, +// B = alpha * B * A if tA == blas.NoTrans and s == blas.Right, +// B = alpha * B * Aᵀ if tA == blas.Trans and s == blas.Right, +// B = alpha * B * Aᴴ if tA == blas.ConjTrans and s == blas.Right, +// // where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is // a scalar. func Trmm(s blas.Side, tA blas.Transpose, alpha complex64, a Triangular, b General) { @@ -469,12 +529,14 @@ func Trmm(s blas.Side, tA blas.Transpose, alpha complex64, a Triangular, b Gener } // Trsm solves -// A * X = alpha * B if tA == blas.NoTrans and s == blas.Left, -// Aᵀ * X = alpha * B if tA == blas.Trans and s == blas.Left, -// Aᴴ * X = alpha * B if tA == blas.ConjTrans and s == blas.Left, -// X * A = alpha * B if tA == blas.NoTrans and s == blas.Right, -// X * Aᵀ = alpha * B if tA == blas.Trans and s == blas.Right, -// X * Aᴴ = alpha * B if tA == blas.ConjTrans and s == blas.Right, +// +// A * X = alpha * B if tA == blas.NoTrans and s == blas.Left, +// Aᵀ * X = alpha * B if tA == blas.Trans and s == blas.Left, +// Aᴴ * X = alpha * B if tA == blas.ConjTrans and s == blas.Left, +// X * A = alpha * B if tA == blas.NoTrans and s == blas.Right, +// X * Aᵀ = alpha * B if tA == blas.Trans and s == blas.Right, +// X * Aᴴ = alpha * B if tA == blas.ConjTrans and s == blas.Right, +// // where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and // alpha is a scalar. // @@ -487,8 +549,10 @@ func Trsm(s blas.Side, tA blas.Transpose, alpha complex64, a Triangular, b Gener } // Hemm performs -// C = alpha * A * B + beta * C if s == blas.Left, -// C = alpha * B * A + beta * C if s == blas.Right, +// +// C = alpha * A * B + beta * C if s == blas.Left, +// C = alpha * B * A + beta * C if s == blas.Right, +// // where A is an n×n or m×m Hermitian matrix, B and C are m×n matrices, and // alpha and beta are scalars. func Hemm(s blas.Side, alpha complex64, a Hermitian, b General, beta complex64, c General) { @@ -502,8 +566,10 @@ func Hemm(s blas.Side, alpha complex64, a Hermitian, b General, beta complex64, } // Herk performs the Hermitian rank-k update -// C = alpha * A * Aᴴ + beta*C if t == blas.NoTrans, -// C = alpha * Aᴴ * A + beta*C if t == blas.ConjTrans, +// +// C = alpha * A * Aᴴ + beta*C if t == blas.NoTrans, +// C = alpha * Aᴴ * A + beta*C if t == blas.ConjTrans, +// // where C is an n×n Hermitian matrix, A is an n×k matrix if t == blas.NoTrans // and a k×n matrix otherwise, and alpha and beta are scalars. func Herk(t blas.Transpose, alpha float32, a General, beta float32, c Hermitian) { @@ -517,8 +583,10 @@ func Herk(t blas.Transpose, alpha float32, a General, beta float32, c Hermitian) } // Her2k performs the Hermitian rank-2k update -// C = alpha * A * Bᴴ + conj(alpha) * B * Aᴴ + beta * C if t == blas.NoTrans, -// C = alpha * Aᴴ * B + conj(alpha) * Bᴴ * A + beta * C if t == blas.ConjTrans, +// +// C = alpha * A * Bᴴ + conj(alpha) * B * Aᴴ + beta * C if t == blas.NoTrans, +// C = alpha * Aᴴ * B + conj(alpha) * Bᴴ * A + beta * C if t == blas.ConjTrans, +// // where C is an n×n Hermitian matrix, A and B are n×k matrices if t == NoTrans // and k×n matrices otherwise, and alpha and beta are scalars. func Her2k(t blas.Transpose, alpha complex64, a, b General, beta float32, c Hermitian) { diff --git a/blas/gonum/dgemm.go b/blas/gonum/dgemm.go index 9ebf6b2a..9e74cc1d 100644 --- a/blas/gonum/dgemm.go +++ b/blas/gonum/dgemm.go @@ -13,10 +13,12 @@ import ( ) // Dgemm performs one of the matrix-matrix operations -// C = alpha * A * B + beta * C -// C = alpha * Aᵀ * B + beta * C -// C = alpha * A * Bᵀ + beta * C -// C = alpha * Aᵀ * Bᵀ + beta * C +// +// C = alpha * A * B + beta * C +// C = alpha * Aᵀ * B + beta * C +// C = alpha * A * Bᵀ + beta * C +// C = alpha * Aᵀ * Bᵀ + beta * C +// // where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is // an m×n matrix, and alpha and beta are scalars. tA and tB specify whether A or // B are transposed. diff --git a/blas/gonum/doc.go b/blas/gonum/doc.go index 3f4b6c1d..cbca601d 100644 --- a/blas/gonum/doc.go +++ b/blas/gonum/doc.go @@ -28,7 +28,9 @@ a single vector argument where the increment may only be positive. If the increm is negative, s[0] is the last element in the slice. Note that this is not the same as counting backward from the end of the slice, as len(s) may be longer than necessary. So, for example, if n = 5 and incX = 3, the elements of s are + [0 * * 1 * * 2 * * 3 * * 4 * * * ...] + where ∗ elements are never accessed. If incX = -3, the same elements are accessed, just in reverse order (4, 3, 2, 1, 0). @@ -36,7 +38,9 @@ Dense matrices are specified by a number of rows, a number of columns, and a str The stride specifies the number of entries in the slice between the first element of successive rows. The stride must be at least as large as the number of columns but may be longer. + [a00 ... a0n a0* ... a1stride-1 a21 ... amn am* ... amstride-1] + Thus, dense[i*ld + j] refers to the {i, j}th element of the matrix. Symmetric and triangular matrices (non-packed) are stored identically to Dense, @@ -45,41 +49,48 @@ except that only elements in one triangle of the matrix are accessed. Packed symmetric and packed triangular matrices are laid out with the entries condensed such that all of the unreferenced elements are removed. So, the upper triangular matrix - [ - 1 2 3 - 0 4 5 - 0 0 6 - ] + + [ + 1 2 3 + 0 4 5 + 0 0 6 + ] + and the lower-triangular matrix - [ - 1 0 0 - 2 3 0 - 4 5 6 - ] + + [ + 1 0 0 + 2 3 0 + 4 5 6 + ] + will both be compacted as [1 2 3 4 5 6]. The (i, j) element of the original dense matrix can be found at element i*n - (i-1)*i/2 + j for upper triangular, and at element i * (i+1) /2 + j for lower triangular. Banded matrices are laid out in a compact format, constructed by removing the zeros in the rows and aligning the diagonals. For example, the matrix - [ - 1 2 3 0 0 0 - 4 5 6 7 0 0 - 0 8 9 10 11 0 - 0 0 12 13 14 15 - 0 0 0 16 17 18 - 0 0 0 0 19 20 - ] + + [ + 1 2 3 0 0 0 + 4 5 6 7 0 0 + 0 8 9 10 11 0 + 0 0 12 13 14 15 + 0 0 0 16 17 18 + 0 0 0 0 19 20 + ] implicitly becomes (∗ entries are never accessed) - [ - * 1 2 3 - 4 5 6 7 - 8 9 10 11 - 12 13 14 15 - 16 17 18 * - 19 20 * * - ] + + [ + * 1 2 3 + 4 5 6 7 + 8 9 10 11 + 12 13 14 15 + 16 17 18 * + 19 20 * * + ] + which is given to the BLAS routine as [∗ 1 2 3 4 ...]. See http://www.crest.iu.edu/research/mtl/reference/html/banded.html diff --git a/blas/gonum/level1cmplx128.go b/blas/gonum/level1cmplx128.go index a207db4b..3e3af0db 100644 --- a/blas/gonum/level1cmplx128.go +++ b/blas/gonum/level1cmplx128.go @@ -14,7 +14,9 @@ import ( var _ blas.Complex128Level1 = Implementation{} // Dzasum returns the sum of the absolute values of the elements of x -// \sum_i |Re(x[i])| + |Im(x[i])| +// +// \sum_i |Re(x[i])| + |Im(x[i])| +// // Dzasum returns 0 if incX is negative. func (Implementation) Dzasum(n int, x []complex128, incX int) float64 { if n < 0 { @@ -47,7 +49,9 @@ func (Implementation) Dzasum(n int, x []complex128, incX int) float64 { } // Dznrm2 computes the Euclidean norm of the complex vector x, -// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). +// +// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). +// // This function returns 0 if incX is negative. func (Implementation) Dznrm2(n int, x []complex128, incX int) float64 { if incX < 1 { @@ -164,7 +168,8 @@ func (Implementation) Izamax(n int, x []complex128, incX int) int { } // Zaxpy adds alpha times x to y: -// y[i] += alpha * x[i] for all i +// +// y[i] += alpha * x[i] for all i func (Implementation) Zaxpy(n int, alpha complex128, x []complex128, incX int, y []complex128, incY int) { if incX == 0 { panic(zeroIncX) @@ -240,7 +245,9 @@ func (Implementation) Zcopy(n int, x []complex128, incX int, y []complex128, inc } // Zdotc computes the dot product -// xᴴ · y +// +// xᴴ · y +// // of two complex vectors x and y. func (Implementation) Zdotc(n int, x []complex128, incX int, y []complex128, incY int) complex128 { if incX == 0 { @@ -281,7 +288,9 @@ func (Implementation) Zdotc(n int, x []complex128, incX int, y []complex128, inc } // Zdotu computes the dot product -// xᵀ · y +// +// xᵀ · y +// // of two complex vectors x and y. func (Implementation) Zdotu(n int, x []complex128, incX int, y []complex128, incY int) complex128 { if incX == 0 { diff --git a/blas/gonum/level1cmplx64.go b/blas/gonum/level1cmplx64.go index 018bae21..249335ca 100644 --- a/blas/gonum/level1cmplx64.go +++ b/blas/gonum/level1cmplx64.go @@ -16,7 +16,9 @@ import ( var _ blas.Complex64Level1 = Implementation{} // Scasum returns the sum of the absolute values of the elements of x -// \sum_i |Re(x[i])| + |Im(x[i])| +// +// \sum_i |Re(x[i])| + |Im(x[i])| +// // Scasum returns 0 if incX is negative. // // Complex64 implementations are autogenerated and not directly tested. @@ -51,7 +53,9 @@ func (Implementation) Scasum(n int, x []complex64, incX int) float32 { } // Scnrm2 computes the Euclidean norm of the complex vector x, -// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). +// +// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). +// // This function returns 0 if incX is negative. // // Complex64 implementations are autogenerated and not directly tested. @@ -172,7 +176,8 @@ func (Implementation) Icamax(n int, x []complex64, incX int) int { } // Caxpy adds alpha times x to y: -// y[i] += alpha * x[i] for all i +// +// y[i] += alpha * x[i] for all i // // Complex64 implementations are autogenerated and not directly tested. func (Implementation) Caxpy(n int, alpha complex64, x []complex64, incX int, y []complex64, incY int) { @@ -252,7 +257,9 @@ func (Implementation) Ccopy(n int, x []complex64, incX int, y []complex64, incY } // Cdotc computes the dot product -// xᴴ · y +// +// xᴴ · y +// // of two complex vectors x and y. // // Complex64 implementations are autogenerated and not directly tested. @@ -295,7 +302,9 @@ func (Implementation) Cdotc(n int, x []complex64, incX int, y []complex64, incY } // Cdotu computes the dot product -// xᵀ · y +// +// xᵀ · y +// // of two complex vectors x and y. // // Complex64 implementations are autogenerated and not directly tested. diff --git a/blas/gonum/level1float32.go b/blas/gonum/level1float32.go index 6ea6db8a..a90b88ac 100644 --- a/blas/gonum/level1float32.go +++ b/blas/gonum/level1float32.go @@ -16,7 +16,9 @@ import ( var _ blas.Float32Level1 = Implementation{} // Snrm2 computes the Euclidean norm of a vector, -// sqrt(\sum_i x[i] * x[i]). +// +// sqrt(\sum_i x[i] * x[i]). +// // This function returns 0 if incX is negative. // // Float32 implementations are autogenerated and not directly tested. @@ -46,7 +48,9 @@ func (Implementation) Snrm2(n int, x []float32, incX int) float32 { } // Sasum computes the sum of the absolute values of the elements of x. -// \sum_i |x[i]| +// +// \sum_i |x[i]| +// // Sasum returns 0 if incX is negative. // // Float32 implementations are autogenerated and not directly tested. @@ -127,7 +131,8 @@ func (Implementation) Isamax(n int, x []float32, incX int) int { } // Sswap exchanges the elements of two vectors. -// x[i], y[i] = y[i], x[i] for all i +// +// x[i], y[i] = y[i], x[i] for all i // // Float32 implementations are autogenerated and not directly tested. func (Implementation) Sswap(n int, x []float32, incX int, y []float32, incY int) { @@ -171,7 +176,8 @@ func (Implementation) Sswap(n int, x []float32, incX int, y []float32, incY int) } // Scopy copies the elements of x into the elements of y. -// y[i] = x[i] for all i +// +// y[i] = x[i] for all i // // Float32 implementations are autogenerated and not directly tested. func (Implementation) Scopy(n int, x []float32, incX int, y []float32, incY int) { @@ -212,7 +218,8 @@ func (Implementation) Scopy(n int, x []float32, incX int, y []float32, incY int) } // Saxpy adds alpha times x to y -// y[i] += alpha * x[i] for all i +// +// y[i] += alpha * x[i] for all i // // Float32 implementations are autogenerated and not directly tested. func (Implementation) Saxpy(n int, alpha float32, x []float32, incX int, y []float32, incY int) { @@ -252,26 +259,32 @@ func (Implementation) Saxpy(n int, alpha float32, x []float32, incX int, y []flo } // Srotg computes a plane rotation -// ⎡ c s ⎤ ⎡ a ⎤ = ⎡ r ⎤ -// ⎣ -s c ⎦ ⎣ b ⎦ ⎣ 0 ⎦ +// +// ⎡ c s ⎤ ⎡ a ⎤ = ⎡ r ⎤ +// ⎣ -s c ⎦ ⎣ b ⎦ ⎣ 0 ⎦ +// // satisfying c^2 + s^2 = 1. // // The computation uses the formulas -// sigma = sgn(a) if |a| > |b| -// = sgn(b) if |b| >= |a| -// r = sigma*sqrt(a^2 + b^2) -// c = 1; s = 0 if r = 0 -// c = a/r; s = b/r if r != 0 -// c >= 0 if |a| > |b| +// +// sigma = sgn(a) if |a| > |b| +// = sgn(b) if |b| >= |a| +// r = sigma*sqrt(a^2 + b^2) +// c = 1; s = 0 if r = 0 +// c = a/r; s = b/r if r != 0 +// c >= 0 if |a| > |b| // // The subroutine also computes -// z = s if |a| > |b|, -// = 1/c if |b| >= |a| and c != 0 -// = 1 if c = 0 +// +// z = s if |a| > |b|, +// = 1/c if |b| >= |a| and c != 0 +// = 1 if c = 0 +// // This allows c and s to be reconstructed from z as follows: -// If z = 1, set c = 0, s = 1. -// If |z| < 1, set c = sqrt(1 - z^2) and s = z. -// If |z| > 1, set c = 1/z and s = sqrt(1 - c^2). +// +// If z = 1, set c = 0, s = 1. +// If |z| < 1, set c = sqrt(1 - z^2) and s = z. +// If |z| > 1, set c = 1/z and s = sqrt(1 - c^2). // // NOTE: There is a discrepancy between the reference implementation and the // BLAS technical manual regarding the sign for r when a or b are zero. Drotg @@ -440,8 +453,9 @@ func (Implementation) Srotmg(d1, d2, x1, y1 float32) (p blas.SrotmParams, rd1, r } // Srot applies a plane transformation. -// x[i] = c * x[i] + s * y[i] -// y[i] = c * y[i] - s * x[i] +// +// x[i] = c * x[i] + s * y[i] +// y[i] = c * y[i] - s * x[i] // // Float32 implementations are autogenerated and not directly tested. func (Implementation) Srot(n int, x []float32, incX int, y []float32, incY int, c float32, s float32) { @@ -596,7 +610,9 @@ func (Implementation) Srotm(n int, x []float32, incX int, y []float32, incY int, } // Sscal scales x by alpha. -// x[i] *= alpha +// +// x[i] *= alpha +// // Sscal has no effect if incX < 0. // // Float32 implementations are autogenerated and not directly tested. diff --git a/blas/gonum/level1float32_dsdot.go b/blas/gonum/level1float32_dsdot.go index 089e0d8f..cd7df411 100644 --- a/blas/gonum/level1float32_dsdot.go +++ b/blas/gonum/level1float32_dsdot.go @@ -11,7 +11,8 @@ import ( ) // Dsdot computes the dot product of the two vectors -// \sum_i x[i]*y[i] +// +// \sum_i x[i]*y[i] // // Float32 implementations are autogenerated and not directly tested. func (Implementation) Dsdot(n int, x []float32, incX int, y []float32, incY int) float64 { diff --git a/blas/gonum/level1float32_sdot.go b/blas/gonum/level1float32_sdot.go index 41c3e792..c4cc1663 100644 --- a/blas/gonum/level1float32_sdot.go +++ b/blas/gonum/level1float32_sdot.go @@ -11,7 +11,8 @@ import ( ) // Sdot computes the dot product of the two vectors -// \sum_i x[i]*y[i] +// +// \sum_i x[i]*y[i] // // Float32 implementations are autogenerated and not directly tested. func (Implementation) Sdot(n int, x []float32, incX int, y []float32, incY int) float32 { diff --git a/blas/gonum/level1float32_sdsdot.go b/blas/gonum/level1float32_sdsdot.go index 69dd8aa1..eb6b73bd 100644 --- a/blas/gonum/level1float32_sdsdot.go +++ b/blas/gonum/level1float32_sdsdot.go @@ -11,7 +11,8 @@ import ( ) // Sdsdot computes the dot product of the two vectors plus a constant -// alpha + \sum_i x[i]*y[i] +// +// alpha + \sum_i x[i]*y[i] // // Float32 implementations are autogenerated and not directly tested. func (Implementation) Sdsdot(n int, alpha float32, x []float32, incX int, y []float32, incY int) float32 { diff --git a/blas/gonum/level1float64.go b/blas/gonum/level1float64.go index e7efe967..795769d9 100644 --- a/blas/gonum/level1float64.go +++ b/blas/gonum/level1float64.go @@ -14,7 +14,9 @@ import ( var _ blas.Float64Level1 = Implementation{} // Dnrm2 computes the Euclidean norm of a vector, -// sqrt(\sum_i x[i] * x[i]). +// +// sqrt(\sum_i x[i] * x[i]). +// // This function returns 0 if incX is negative. func (Implementation) Dnrm2(n int, x []float64, incX int) float64 { if incX < 1 { @@ -42,7 +44,9 @@ func (Implementation) Dnrm2(n int, x []float64, incX int) float64 { } // Dasum computes the sum of the absolute values of the elements of x. -// \sum_i |x[i]| +// +// \sum_i |x[i]| +// // Dasum returns 0 if incX is negative. func (Implementation) Dasum(n int, x []float64, incX int) float64 { var sum float64 @@ -119,7 +123,8 @@ func (Implementation) Idamax(n int, x []float64, incX int) int { } // Dswap exchanges the elements of two vectors. -// x[i], y[i] = y[i], x[i] for all i +// +// x[i], y[i] = y[i], x[i] for all i func (Implementation) Dswap(n int, x []float64, incX int, y []float64, incY int) { if incX == 0 { panic(zeroIncX) @@ -161,7 +166,8 @@ func (Implementation) Dswap(n int, x []float64, incX int, y []float64, incY int) } // Dcopy copies the elements of x into the elements of y. -// y[i] = x[i] for all i +// +// y[i] = x[i] for all i func (Implementation) Dcopy(n int, x []float64, incX int, y []float64, incY int) { if incX == 0 { panic(zeroIncX) @@ -200,7 +206,8 @@ func (Implementation) Dcopy(n int, x []float64, incX int, y []float64, incY int) } // Daxpy adds alpha times x to y -// y[i] += alpha * x[i] for all i +// +// y[i] += alpha * x[i] for all i func (Implementation) Daxpy(n int, alpha float64, x []float64, incX int, y []float64, incY int) { if incX == 0 { panic(zeroIncX) @@ -238,26 +245,32 @@ func (Implementation) Daxpy(n int, alpha float64, x []float64, incX int, y []flo } // Drotg computes a plane rotation -// ⎡ c s ⎤ ⎡ a ⎤ = ⎡ r ⎤ -// ⎣ -s c ⎦ ⎣ b ⎦ ⎣ 0 ⎦ +// +// ⎡ c s ⎤ ⎡ a ⎤ = ⎡ r ⎤ +// ⎣ -s c ⎦ ⎣ b ⎦ ⎣ 0 ⎦ +// // satisfying c^2 + s^2 = 1. // // The computation uses the formulas -// sigma = sgn(a) if |a| > |b| -// = sgn(b) if |b| >= |a| -// r = sigma*sqrt(a^2 + b^2) -// c = 1; s = 0 if r = 0 -// c = a/r; s = b/r if r != 0 -// c >= 0 if |a| > |b| +// +// sigma = sgn(a) if |a| > |b| +// = sgn(b) if |b| >= |a| +// r = sigma*sqrt(a^2 + b^2) +// c = 1; s = 0 if r = 0 +// c = a/r; s = b/r if r != 0 +// c >= 0 if |a| > |b| // // The subroutine also computes -// z = s if |a| > |b|, -// = 1/c if |b| >= |a| and c != 0 -// = 1 if c = 0 +// +// z = s if |a| > |b|, +// = 1/c if |b| >= |a| and c != 0 +// = 1 if c = 0 +// // This allows c and s to be reconstructed from z as follows: -// If z = 1, set c = 0, s = 1. -// If |z| < 1, set c = sqrt(1 - z^2) and s = z. -// If |z| > 1, set c = 1/z and s = sqrt(1 - c^2). +// +// If z = 1, set c = 0, s = 1. +// If |z| < 1, set c = sqrt(1 - z^2) and s = z. +// If |z| > 1, set c = 1/z and s = sqrt(1 - c^2). // // NOTE: There is a discrepancy between the reference implementation and the // BLAS technical manual regarding the sign for r when a or b are zero. Drotg @@ -422,8 +435,9 @@ func (Implementation) Drotmg(d1, d2, x1, y1 float64) (p blas.DrotmParams, rd1, r } // Drot applies a plane transformation. -// x[i] = c * x[i] + s * y[i] -// y[i] = c * y[i] - s * x[i] +// +// x[i] = c * x[i] + s * y[i] +// y[i] = c * y[i] - s * x[i] func (Implementation) Drot(n int, x []float64, incX int, y []float64, incY int, c float64, s float64) { if incX == 0 { panic(zeroIncX) @@ -574,7 +588,9 @@ func (Implementation) Drotm(n int, x []float64, incX int, y []float64, incY int, } // Dscal scales x by alpha. -// x[i] *= alpha +// +// x[i] *= alpha +// // Dscal has no effect if incX < 0. func (Implementation) Dscal(n int, alpha float64, x []float64, incX int) { if incX < 1 { diff --git a/blas/gonum/level1float64_ddot.go b/blas/gonum/level1float64_ddot.go index be87ba13..1569656e 100644 --- a/blas/gonum/level1float64_ddot.go +++ b/blas/gonum/level1float64_ddot.go @@ -9,7 +9,8 @@ import ( ) // Ddot computes the dot product of the two vectors -// \sum_i x[i]*y[i] +// +// \sum_i x[i]*y[i] func (Implementation) Ddot(n int, x []float64, incX int, y []float64, incY int) float64 { if incX == 0 { panic(zeroIncX) diff --git a/blas/gonum/level2cmplx128.go b/blas/gonum/level2cmplx128.go index d0ca4eb9..fa076d5f 100644 --- a/blas/gonum/level2cmplx128.go +++ b/blas/gonum/level2cmplx128.go @@ -14,9 +14,11 @@ import ( var _ blas.Complex128Level2 = Implementation{} // Zgbmv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if trans = blas.NoTrans -// y = alpha * Aᵀ * x + beta * y if trans = blas.Trans -// y = alpha * Aᴴ * x + beta * y if trans = blas.ConjTrans +// +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * Aᵀ * x + beta * y if trans = blas.Trans +// y = alpha * Aᴴ * x + beta * y if trans = blas.ConjTrans +// // where alpha and beta are scalars, x and y are vectors, and A is an m×n band matrix // with kL sub-diagonals and kU super-diagonals. func (Implementation) Zgbmv(trans blas.Transpose, m, n, kL, kU int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { @@ -209,9 +211,11 @@ func (Implementation) Zgbmv(trans blas.Transpose, m, n, kL, kU int, alpha comple } // Zgemv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if trans = blas.NoTrans -// y = alpha * Aᵀ * x + beta * y if trans = blas.Trans -// y = alpha * Aᴴ * x + beta * y if trans = blas.ConjTrans +// +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * Aᵀ * x + beta * y if trans = blas.Trans +// y = alpha * Aᴴ * x + beta * y if trans = blas.ConjTrans +// // where alpha and beta are scalars, x and y are vectors, and A is an m×n dense matrix. func (Implementation) Zgemv(trans blas.Transpose, m, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { switch trans { @@ -364,7 +368,9 @@ func (Implementation) Zgemv(trans blas.Transpose, m, n int, alpha complex128, a } // Zgerc performs the rank-one operation -// A += alpha * x * yᴴ +// +// A += alpha * x * yᴴ +// // where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, // and y is an n element vector. func (Implementation) Zgerc(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { @@ -422,7 +428,9 @@ func (Implementation) Zgerc(m, n int, alpha complex128, x []complex128, incX int } // Zgeru performs the rank-one operation -// A += alpha * x * yᵀ +// +// A += alpha * x * yᵀ +// // where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, // and y is an n element vector. func (Implementation) Zgeru(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { @@ -491,7 +499,9 @@ func (Implementation) Zgeru(m, n int, alpha complex128, x []complex128, incX int } // Zhbmv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where alpha and beta are scalars, x and y are vectors, and A is an n×n // Hermitian band matrix with k super-diagonals. The imaginary parts of // the diagonal elements of A are ignored and assumed to be zero. @@ -662,7 +672,9 @@ func (Implementation) Zhbmv(uplo blas.Uplo, n, k int, alpha complex128, a []comp } // Zhemv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where alpha and beta are scalars, x and y are vectors, and A is an n×n // Hermitian matrix. The imaginary parts of the diagonal elements of A are // ignored and assumed to be zero. @@ -822,7 +834,9 @@ func (Implementation) Zhemv(uplo blas.Uplo, n int, alpha complex128, a []complex } // Zher performs the Hermitian rank-one operation -// A += alpha * x * xᴴ +// +// A += alpha * x * xᴴ +// // where A is an n×n Hermitian matrix, alpha is a real scalar, and x is an n // element vector. On entry, the imaginary parts of the diagonal elements of A // are ignored and assumed to be zero, on return they will be set to zero. @@ -944,7 +958,9 @@ func (Implementation) Zher(uplo blas.Uplo, n int, alpha float64, x []complex128, } // Zher2 performs the Hermitian rank-two operation -// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ +// +// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ +// // where alpha is a scalar, x and y are n element vectors and A is an n×n // Hermitian matrix. On entry, the imaginary parts of the diagonal elements are // ignored and assumed to be zero. On return they will be set to zero. @@ -1081,7 +1097,9 @@ func (Implementation) Zher2(uplo blas.Uplo, n int, alpha complex128, x []complex } // Zhpmv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where alpha and beta are scalars, x and y are vectors, and A is an n×n // Hermitian matrix in packed form. The imaginary parts of the diagonal // elements of A are ignored and assumed to be zero. @@ -1248,7 +1266,9 @@ func (Implementation) Zhpmv(uplo blas.Uplo, n int, alpha complex128, ap []comple } // Zhpr performs the Hermitian rank-1 operation -// A += alpha * x * xᴴ +// +// A += alpha * x * xᴴ +// // where alpha is a real scalar, x is a vector, and A is an n×n hermitian matrix // in packed form. On entry, the imaginary parts of the diagonal elements are // assumed to be zero, and on return they are set to zero. @@ -1382,7 +1402,9 @@ func (Implementation) Zhpr(uplo blas.Uplo, n int, alpha float64, x []complex128, } // Zhpr2 performs the Hermitian rank-2 operation -// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ +// +// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ +// // where alpha is a complex scalar, x and y are n element vectors, and A is an // n×n Hermitian matrix, supplied in packed form. On entry, the imaginary parts // of the diagonal elements are assumed to be zero, and on return they are set to zero. @@ -1529,9 +1551,11 @@ func (Implementation) Zhpr2(uplo blas.Uplo, n int, alpha complex128, x []complex } // Ztbmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = Aᵀ * x if trans = blas.Trans -// x = Aᴴ * x if trans = blas.ConjTrans +// +// x = A * x if trans = blas.NoTrans +// x = Aᵀ * x if trans = blas.Trans +// x = Aᴴ * x if trans = blas.ConjTrans +// // where x is an n element vector and A is an n×n triangular band matrix, with // (k+1) diagonals. func (Implementation) Ztbmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex128, lda int, x []complex128, incX int) { @@ -1765,9 +1789,11 @@ func (Implementation) Ztbmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag } // Ztbsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// Aᵀ * x = b if trans == blas.Trans -// Aᴴ * x = b if trans == blas.ConjTrans +// +// A * x = b if trans == blas.NoTrans +// Aᵀ * x = b if trans == blas.Trans +// Aᴴ * x = b if trans == blas.ConjTrans +// // where b and x are n element vectors and A is an n×n triangular band matrix // with (k+1) diagonals. // @@ -2007,9 +2033,11 @@ func (Implementation) Ztbsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag } // Ztpmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = Aᵀ * x if trans = blas.Trans -// x = Aᴴ * x if trans = blas.ConjTrans +// +// x = A * x if trans = blas.NoTrans +// x = Aᵀ * x if trans = blas.Trans +// x = Aᴴ * x if trans = blas.ConjTrans +// // where x is an n element vector and A is an n×n triangular matrix, supplied in // packed form. func (Implementation) Ztpmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex128, x []complex128, incX int) { @@ -2245,9 +2273,11 @@ func (Implementation) Ztpmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag } // Ztpsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// Aᵀ * x = b if trans == blas.Trans -// Aᴴ * x = b if trans == blas.ConjTrans +// +// A * x = b if trans == blas.NoTrans +// Aᵀ * x = b if trans == blas.Trans +// Aᴴ * x = b if trans == blas.ConjTrans +// // where b and x are n element vectors and A is an n×n triangular matrix in // packed form. // @@ -2481,9 +2511,11 @@ func (Implementation) Ztpsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag } // Ztrmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = Aᵀ * x if trans = blas.Trans -// x = Aᴴ * x if trans = blas.ConjTrans +// +// x = A * x if trans = blas.NoTrans +// x = Aᵀ * x if trans = blas.Trans +// x = Aᴴ * x if trans = blas.ConjTrans +// // where x is a vector, and A is an n×n triangular matrix. func (Implementation) Ztrmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex128, lda int, x []complex128, incX int) { switch trans { @@ -2689,9 +2721,11 @@ func (Implementation) Ztrmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag } // Ztrsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// Aᵀ * x = b if trans == blas.Trans -// Aᴴ * x = b if trans == blas.ConjTrans +// +// A * x = b if trans == blas.NoTrans +// Aᵀ * x = b if trans == blas.Trans +// Aᴴ * x = b if trans == blas.ConjTrans +// // where b and x are n element vectors and A is an n×n triangular matrix. // // On entry, x contains the values of b, and the solution is diff --git a/blas/gonum/level2cmplx64.go b/blas/gonum/level2cmplx64.go index 3aa4c21c..3ce67868 100644 --- a/blas/gonum/level2cmplx64.go +++ b/blas/gonum/level2cmplx64.go @@ -16,9 +16,11 @@ import ( var _ blas.Complex64Level2 = Implementation{} // Cgbmv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if trans = blas.NoTrans -// y = alpha * Aᵀ * x + beta * y if trans = blas.Trans -// y = alpha * Aᴴ * x + beta * y if trans = blas.ConjTrans +// +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * Aᵀ * x + beta * y if trans = blas.Trans +// y = alpha * Aᴴ * x + beta * y if trans = blas.ConjTrans +// // where alpha and beta are scalars, x and y are vectors, and A is an m×n band matrix // with kL sub-diagonals and kU super-diagonals. // @@ -213,9 +215,11 @@ func (Implementation) Cgbmv(trans blas.Transpose, m, n, kL, kU int, alpha comple } // Cgemv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if trans = blas.NoTrans -// y = alpha * Aᵀ * x + beta * y if trans = blas.Trans -// y = alpha * Aᴴ * x + beta * y if trans = blas.ConjTrans +// +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * Aᵀ * x + beta * y if trans = blas.Trans +// y = alpha * Aᴴ * x + beta * y if trans = blas.ConjTrans +// // where alpha and beta are scalars, x and y are vectors, and A is an m×n dense matrix. // // Complex64 implementations are autogenerated and not directly tested. @@ -370,7 +374,9 @@ func (Implementation) Cgemv(trans blas.Transpose, m, n int, alpha complex64, a [ } // Cgerc performs the rank-one operation -// A += alpha * x * yᴴ +// +// A += alpha * x * yᴴ +// // where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, // and y is an n element vector. // @@ -430,7 +436,9 @@ func (Implementation) Cgerc(m, n int, alpha complex64, x []complex64, incX int, } // Cgeru performs the rank-one operation -// A += alpha * x * yᵀ +// +// A += alpha * x * yᵀ +// // where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, // and y is an n element vector. // @@ -501,7 +509,9 @@ func (Implementation) Cgeru(m, n int, alpha complex64, x []complex64, incX int, } // Chbmv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where alpha and beta are scalars, x and y are vectors, and A is an n×n // Hermitian band matrix with k super-diagonals. The imaginary parts of // the diagonal elements of A are ignored and assumed to be zero. @@ -674,7 +684,9 @@ func (Implementation) Chbmv(uplo blas.Uplo, n, k int, alpha complex64, a []compl } // Chemv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where alpha and beta are scalars, x and y are vectors, and A is an n×n // Hermitian matrix. The imaginary parts of the diagonal elements of A are // ignored and assumed to be zero. @@ -836,7 +848,9 @@ func (Implementation) Chemv(uplo blas.Uplo, n int, alpha complex64, a []complex6 } // Cher performs the Hermitian rank-one operation -// A += alpha * x * xᴴ +// +// A += alpha * x * xᴴ +// // where A is an n×n Hermitian matrix, alpha is a real scalar, and x is an n // element vector. On entry, the imaginary parts of the diagonal elements of A // are ignored and assumed to be zero, on return they will be set to zero. @@ -960,7 +974,9 @@ func (Implementation) Cher(uplo blas.Uplo, n int, alpha float32, x []complex64, } // Cher2 performs the Hermitian rank-two operation -// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ +// +// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ +// // where alpha is a scalar, x and y are n element vectors and A is an n×n // Hermitian matrix. On entry, the imaginary parts of the diagonal elements are // ignored and assumed to be zero. On return they will be set to zero. @@ -1099,7 +1115,9 @@ func (Implementation) Cher2(uplo blas.Uplo, n int, alpha complex64, x []complex6 } // Chpmv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where alpha and beta are scalars, x and y are vectors, and A is an n×n // Hermitian matrix in packed form. The imaginary parts of the diagonal // elements of A are ignored and assumed to be zero. @@ -1268,7 +1286,9 @@ func (Implementation) Chpmv(uplo blas.Uplo, n int, alpha complex64, ap []complex } // Chpr performs the Hermitian rank-1 operation -// A += alpha * x * xᴴ +// +// A += alpha * x * xᴴ +// // where alpha is a real scalar, x is a vector, and A is an n×n hermitian matrix // in packed form. On entry, the imaginary parts of the diagonal elements are // assumed to be zero, and on return they are set to zero. @@ -1404,7 +1424,9 @@ func (Implementation) Chpr(uplo blas.Uplo, n int, alpha float32, x []complex64, } // Chpr2 performs the Hermitian rank-2 operation -// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ +// +// A += alpha * x * yᴴ + conj(alpha) * y * xᴴ +// // where alpha is a complex scalar, x and y are n element vectors, and A is an // n×n Hermitian matrix, supplied in packed form. On entry, the imaginary parts // of the diagonal elements are assumed to be zero, and on return they are set to zero. @@ -1553,9 +1575,11 @@ func (Implementation) Chpr2(uplo blas.Uplo, n int, alpha complex64, x []complex6 } // Ctbmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = Aᵀ * x if trans = blas.Trans -// x = Aᴴ * x if trans = blas.ConjTrans +// +// x = A * x if trans = blas.NoTrans +// x = Aᵀ * x if trans = blas.Trans +// x = Aᴴ * x if trans = blas.ConjTrans +// // where x is an n element vector and A is an n×n triangular band matrix, with // (k+1) diagonals. // @@ -1791,9 +1815,11 @@ func (Implementation) Ctbmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag } // Ctbsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// Aᵀ * x = b if trans == blas.Trans -// Aᴴ * x = b if trans == blas.ConjTrans +// +// A * x = b if trans == blas.NoTrans +// Aᵀ * x = b if trans == blas.Trans +// Aᴴ * x = b if trans == blas.ConjTrans +// // where b and x are n element vectors and A is an n×n triangular band matrix // with (k+1) diagonals. // @@ -2035,9 +2061,11 @@ func (Implementation) Ctbsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag } // Ctpmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = Aᵀ * x if trans = blas.Trans -// x = Aᴴ * x if trans = blas.ConjTrans +// +// x = A * x if trans = blas.NoTrans +// x = Aᵀ * x if trans = blas.Trans +// x = Aᴴ * x if trans = blas.ConjTrans +// // where x is an n element vector and A is an n×n triangular matrix, supplied in // packed form. // @@ -2275,9 +2303,11 @@ func (Implementation) Ctpmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag } // Ctpsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// Aᵀ * x = b if trans == blas.Trans -// Aᴴ * x = b if trans == blas.ConjTrans +// +// A * x = b if trans == blas.NoTrans +// Aᵀ * x = b if trans == blas.Trans +// Aᴴ * x = b if trans == blas.ConjTrans +// // where b and x are n element vectors and A is an n×n triangular matrix in // packed form. // @@ -2513,9 +2543,11 @@ func (Implementation) Ctpsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag } // Ctrmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = Aᵀ * x if trans = blas.Trans -// x = Aᴴ * x if trans = blas.ConjTrans +// +// x = A * x if trans = blas.NoTrans +// x = Aᵀ * x if trans = blas.Trans +// x = Aᴴ * x if trans = blas.ConjTrans +// // where x is a vector, and A is an n×n triangular matrix. // // Complex64 implementations are autogenerated and not directly tested. @@ -2723,9 +2755,11 @@ func (Implementation) Ctrmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag } // Ctrsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// Aᵀ * x = b if trans == blas.Trans -// Aᴴ * x = b if trans == blas.ConjTrans +// +// A * x = b if trans == blas.NoTrans +// Aᵀ * x = b if trans == blas.Trans +// Aᴴ * x = b if trans == blas.ConjTrans +// // where b and x are n element vectors and A is an n×n triangular matrix. // // On entry, x contains the values of b, and the solution is diff --git a/blas/gonum/level2float32.go b/blas/gonum/level2float32.go index a05b25e4..26e4959d 100644 --- a/blas/gonum/level2float32.go +++ b/blas/gonum/level2float32.go @@ -14,7 +14,9 @@ import ( var _ blas.Float32Level2 = Implementation{} // Sger performs the rank-one operation -// A += alpha * x * yᵀ +// +// A += alpha * x * yᵀ +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. // // Float32 implementations are autogenerated and not directly tested. @@ -63,8 +65,10 @@ func (Implementation) Sger(m, n int, alpha float32, x []float32, incX int, y []f } // Sgbmv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if tA == blas.NoTrans -// y = alpha * Aᵀ * x + beta * y if tA == blas.Trans or blas.ConjTrans +// +// y = alpha * A * x + beta * y if tA == blas.NoTrans +// y = alpha * Aᵀ * x + beta * y if tA == blas.Trans or blas.ConjTrans +// // where A is an m×n band matrix with kL sub-diagonals and kU super-diagonals, // x and y are vectors, and alpha and beta are scalars. // @@ -230,8 +234,10 @@ func (Implementation) Sgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float32, } // Sgemv computes -// y = alpha * A * x + beta * y if tA = blas.NoTrans -// y = alpha * Aᵀ * x + beta * y if tA = blas.Trans or blas.ConjTrans +// +// y = alpha * A * x + beta * y if tA = blas.NoTrans +// y = alpha * Aᵀ * x + beta * y if tA = blas.Trans or blas.ConjTrans +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. // // Float32 implementations are autogenerated and not directly tested. @@ -302,8 +308,10 @@ func (Implementation) Sgemv(tA blas.Transpose, m, n int, alpha float32, a []floa } // Strmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// +// x = A * x if tA == blas.NoTrans +// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular matrix, and x is a vector. // // Float32 implementations are autogenerated and not directly tested. @@ -456,8 +464,10 @@ func (Implementation) Strmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, } // Strsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// Aᵀ * x = b if tA == blas.Trans or blas.ConjTrans +// +// A * x = b if tA == blas.NoTrans +// Aᵀ * x = b if tA == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular matrix, and x and b are vectors. // // At entry to the function, x contains the values of b, and the result is @@ -639,7 +649,9 @@ func (Implementation) Strsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, } // Ssymv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where A is an n×n symmetric matrix, x and y are vectors, and alpha and // beta are scalars. // @@ -809,8 +821,10 @@ func (Implementation) Ssymv(ul blas.Uplo, n int, alpha float32, a []float32, lda } // Stbmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// +// x = A * x if tA == blas.NoTrans +// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular band matrix with k+1 diagonals, and x is a vector. // // Float32 implementations are autogenerated and not directly tested. @@ -1020,8 +1034,10 @@ func (Implementation) Stbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k i } // Stpmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// +// x = A * x if tA == blas.NoTrans +// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular matrix in packed format, and x is a vector. // // Float32 implementations are autogenerated and not directly tested. @@ -1201,8 +1217,10 @@ func (Implementation) Stpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, } // Stbsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// Aᵀ * x = b if tA == blas.Trans or tA == blas.ConjTrans +// +// A * x = b if tA == blas.NoTrans +// Aᵀ * x = b if tA == blas.Trans or tA == blas.ConjTrans +// // where A is an n×n triangular band matrix with k+1 diagonals, // and x and b are vectors. // @@ -1425,7 +1443,9 @@ func (Implementation) Stbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k i } // Ssbmv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where A is an n×n symmetric band matrix with k super-diagonals, x and y are // vectors, and alpha and beta are scalars. // @@ -1597,7 +1617,9 @@ func (Implementation) Ssbmv(ul blas.Uplo, n, k int, alpha float32, a []float32, } // Ssyr performs the symmetric rank-one update -// A += alpha * x * xᵀ +// +// A += alpha * x * xᵀ +// // where A is an n×n symmetric matrix, and x is a vector. // // Float32 implementations are autogenerated and not directly tested. @@ -1697,7 +1719,9 @@ func (Implementation) Ssyr(ul blas.Uplo, n int, alpha float32, x []float32, incX } // Ssyr2 performs the symmetric rank-two update -// A += alpha * x * yᵀ + alpha * y * xᵀ +// +// A += alpha * x * yᵀ + alpha * y * xᵀ +// // where A is an n×n symmetric matrix, x and y are vectors, and alpha is a scalar. // // Float32 implementations are autogenerated and not directly tested. @@ -1806,8 +1830,10 @@ func (Implementation) Ssyr2(ul blas.Uplo, n int, alpha float32, x []float32, inc } // Stpsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// Aᵀ * x = b if tA == blas.Trans or blas.ConjTrans +// +// A * x = b if tA == blas.NoTrans +// Aᵀ * x = b if tA == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular matrix in packed format, and x and b are vectors. // // At entry to the function, x contains the values of b, and the result is @@ -1992,7 +2018,9 @@ func (Implementation) Stpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, } // Sspmv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where A is an n×n symmetric matrix in packed format, x and y are vectors, // and alpha and beta are scalars. // @@ -2161,7 +2189,9 @@ func (Implementation) Sspmv(ul blas.Uplo, n int, alpha float32, ap []float32, x } // Sspr performs the symmetric rank-one operation -// A += alpha * x * xᵀ +// +// A += alpha * x * xᵀ +// // where A is an n×n symmetric matrix in packed format, x is a vector, and // alpha is a scalar. // @@ -2255,7 +2285,9 @@ func (Implementation) Sspr(ul blas.Uplo, n int, alpha float32, x []float32, incX } // Sspr2 performs the symmetric rank-2 update -// A += alpha * x * yᵀ + alpha * y * xᵀ +// +// A += alpha * x * yᵀ + alpha * y * xᵀ +// // where A is an n×n symmetric matrix in packed format, x and y are vectors, // and alpha is a scalar. // diff --git a/blas/gonum/level2float64.go b/blas/gonum/level2float64.go index 3f3dd368..19b9c7e1 100644 --- a/blas/gonum/level2float64.go +++ b/blas/gonum/level2float64.go @@ -12,7 +12,9 @@ import ( var _ blas.Float64Level2 = Implementation{} // Dger performs the rank-one operation -// A += alpha * x * yᵀ +// +// A += alpha * x * yᵀ +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func (Implementation) Dger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { if m < 0 { @@ -59,8 +61,10 @@ func (Implementation) Dger(m, n int, alpha float64, x []float64, incX int, y []f } // Dgbmv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if tA == blas.NoTrans -// y = alpha * Aᵀ * x + beta * y if tA == blas.Trans or blas.ConjTrans +// +// y = alpha * A * x + beta * y if tA == blas.NoTrans +// y = alpha * Aᵀ * x + beta * y if tA == blas.Trans or blas.ConjTrans +// // where A is an m×n band matrix with kL sub-diagonals and kU super-diagonals, // x and y are vectors, and alpha and beta are scalars. func (Implementation) Dgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { @@ -224,8 +228,10 @@ func (Implementation) Dgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float64, } // Dgemv computes -// y = alpha * A * x + beta * y if tA = blas.NoTrans -// y = alpha * Aᵀ * x + beta * y if tA = blas.Trans or blas.ConjTrans +// +// y = alpha * A * x + beta * y if tA = blas.NoTrans +// y = alpha * Aᵀ * x + beta * y if tA = blas.Trans or blas.ConjTrans +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. func (Implementation) Dgemv(tA blas.Transpose, m, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { @@ -294,8 +300,10 @@ func (Implementation) Dgemv(tA blas.Transpose, m, n int, alpha float64, a []floa } // Dtrmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// +// x = A * x if tA == blas.NoTrans +// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular matrix, and x is a vector. func (Implementation) Dtrmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { if ul != blas.Lower && ul != blas.Upper { @@ -446,8 +454,10 @@ func (Implementation) Dtrmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, } // Dtrsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// Aᵀ * x = b if tA == blas.Trans or blas.ConjTrans +// +// A * x = b if tA == blas.NoTrans +// Aᵀ * x = b if tA == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular matrix, and x and b are vectors. // // At entry to the function, x contains the values of b, and the result is @@ -627,7 +637,9 @@ func (Implementation) Dtrsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, } // Dsymv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where A is an n×n symmetric matrix, x and y are vectors, and alpha and // beta are scalars. func (Implementation) Dsymv(ul blas.Uplo, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { @@ -795,8 +807,10 @@ func (Implementation) Dsymv(ul blas.Uplo, n int, alpha float64, a []float64, lda } // Dtbmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// +// x = A * x if tA == blas.NoTrans +// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular band matrix with k+1 diagonals, and x is a vector. func (Implementation) Dtbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { if ul != blas.Lower && ul != blas.Upper { @@ -1004,8 +1018,10 @@ func (Implementation) Dtbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k i } // Dtpmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// +// x = A * x if tA == blas.NoTrans +// x = Aᵀ * x if tA == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular matrix in packed format, and x is a vector. func (Implementation) Dtpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { if ul != blas.Lower && ul != blas.Upper { @@ -1183,8 +1199,10 @@ func (Implementation) Dtpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, } // Dtbsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// Aᵀ * x = b if tA == blas.Trans or tA == blas.ConjTrans +// +// A * x = b if tA == blas.NoTrans +// Aᵀ * x = b if tA == blas.Trans or tA == blas.ConjTrans +// // where A is an n×n triangular band matrix with k+1 diagonals, // and x and b are vectors. // @@ -1405,7 +1423,9 @@ func (Implementation) Dtbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k i } // Dsbmv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where A is an n×n symmetric band matrix with k super-diagonals, x and y are // vectors, and alpha and beta are scalars. func (Implementation) Dsbmv(ul blas.Uplo, n, k int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { @@ -1575,7 +1595,9 @@ func (Implementation) Dsbmv(ul blas.Uplo, n, k int, alpha float64, a []float64, } // Dsyr performs the symmetric rank-one update -// A += alpha * x * xᵀ +// +// A += alpha * x * xᵀ +// // where A is an n×n symmetric matrix, and x is a vector. func (Implementation) Dsyr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, a []float64, lda int) { if ul != blas.Lower && ul != blas.Upper { @@ -1673,7 +1695,9 @@ func (Implementation) Dsyr(ul blas.Uplo, n int, alpha float64, x []float64, incX } // Dsyr2 performs the symmetric rank-two update -// A += alpha * x * yᵀ + alpha * y * xᵀ +// +// A += alpha * x * yᵀ + alpha * y * xᵀ +// // where A is an n×n symmetric matrix, x and y are vectors, and alpha is a scalar. func (Implementation) Dsyr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { if ul != blas.Lower && ul != blas.Upper { @@ -1780,8 +1804,10 @@ func (Implementation) Dsyr2(ul blas.Uplo, n int, alpha float64, x []float64, inc } // Dtpsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// Aᵀ * x = b if tA == blas.Trans or blas.ConjTrans +// +// A * x = b if tA == blas.NoTrans +// Aᵀ * x = b if tA == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular matrix in packed format, and x and b are vectors. // // At entry to the function, x contains the values of b, and the result is @@ -1964,7 +1990,9 @@ func (Implementation) Dtpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, } // Dspmv performs the matrix-vector operation -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where A is an n×n symmetric matrix in packed format, x and y are vectors, // and alpha and beta are scalars. func (Implementation) Dspmv(ul blas.Uplo, n int, alpha float64, ap []float64, x []float64, incX int, beta float64, y []float64, incY int) { @@ -2131,7 +2159,9 @@ func (Implementation) Dspmv(ul blas.Uplo, n int, alpha float64, ap []float64, x } // Dspr performs the symmetric rank-one operation -// A += alpha * x * xᵀ +// +// A += alpha * x * xᵀ +// // where A is an n×n symmetric matrix in packed format, x is a vector, and // alpha is a scalar. func (Implementation) Dspr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, ap []float64) { @@ -2223,7 +2253,9 @@ func (Implementation) Dspr(ul blas.Uplo, n int, alpha float64, x []float64, incX } // Dspr2 performs the symmetric rank-2 update -// A += alpha * x * yᵀ + alpha * y * xᵀ +// +// A += alpha * x * yᵀ + alpha * y * xᵀ +// // where A is an n×n symmetric matrix in packed format, x and y are vectors, // and alpha is a scalar. func (Implementation) Dspr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, ap []float64) { diff --git a/blas/gonum/level3cmplx128.go b/blas/gonum/level3cmplx128.go index 3dfdffa7..bfff8c55 100644 --- a/blas/gonum/level3cmplx128.go +++ b/blas/gonum/level3cmplx128.go @@ -14,9 +14,13 @@ import ( var _ blas.Complex128Level3 = Implementation{} // Zgemm performs one of the matrix-matrix operations -// C = alpha * op(A) * op(B) + beta * C +// +// C = alpha * op(A) * op(B) + beta * C +// // where op(X) is one of -// op(X) = X or op(X) = Xᵀ or op(X) = Xᴴ, +// +// op(X) = X or op(X) = Xᵀ or op(X) = Xᴴ, +// // alpha and beta are scalars, and A, B and C are matrices, with op(A) an m×k matrix, // op(B) a k×n matrix and C an m×n matrix. func (Implementation) Zgemm(tA, tB blas.Transpose, m, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { @@ -258,8 +262,10 @@ func (Implementation) Zgemm(tA, tB blas.Transpose, m, n, k int, alpha complex128 } // Zhemm performs one of the matrix-matrix operations -// C = alpha*A*B + beta*C if side == blas.Left -// C = alpha*B*A + beta*C if side == blas.Right +// +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// // where alpha and beta are scalars, A is an m×m or n×n hermitian matrix and B // and C are m×n matrices. The imaginary parts of the diagonal elements of A are // assumed to be zero. @@ -405,8 +411,10 @@ func (Implementation) Zhemm(side blas.Side, uplo blas.Uplo, m, n int, alpha comp } // Zherk performs one of the hermitian rank-k operations -// C = alpha*A*Aᴴ + beta*C if trans == blas.NoTrans -// C = alpha*Aᴴ*A + beta*C if trans == blas.ConjTrans +// +// C = alpha*A*Aᴴ + beta*C if trans == blas.NoTrans +// C = alpha*Aᴴ*A + beta*C if trans == blas.ConjTrans +// // where alpha and beta are real scalars, C is an n×n hermitian matrix and A is // an n×k matrix in the first case and a k×n matrix in the second case. // @@ -603,8 +611,10 @@ func (Implementation) Zherk(uplo blas.Uplo, trans blas.Transpose, n, k int, alph } // Zher2k performs one of the hermitian rank-2k operations -// C = alpha*A*Bᴴ + conj(alpha)*B*Aᴴ + beta*C if trans == blas.NoTrans -// C = alpha*Aᴴ*B + conj(alpha)*Bᴴ*A + beta*C if trans == blas.ConjTrans +// +// C = alpha*A*Bᴴ + conj(alpha)*B*Aᴴ + beta*C if trans == blas.NoTrans +// C = alpha*Aᴴ*B + conj(alpha)*Bᴴ*A + beta*C if trans == blas.ConjTrans +// // where alpha and beta are scalars with beta real, C is an n×n hermitian matrix // and A and B are n×k matrices in the first case and k×n matrices in the second case. // @@ -799,8 +809,10 @@ func (Implementation) Zher2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alp } // Zsymm performs one of the matrix-matrix operations -// C = alpha*A*B + beta*C if side == blas.Left -// C = alpha*B*A + beta*C if side == blas.Right +// +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// // where alpha and beta are scalars, A is an m×m or n×n symmetric matrix and B // and C are m×n matrices. func (Implementation) Zsymm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { @@ -943,8 +955,10 @@ func (Implementation) Zsymm(side blas.Side, uplo blas.Uplo, m, n int, alpha comp } // Zsyrk performs one of the symmetric rank-k operations -// C = alpha*A*Aᵀ + beta*C if trans == blas.NoTrans -// C = alpha*Aᵀ*A + beta*C if trans == blas.Trans +// +// C = alpha*A*Aᵀ + beta*C if trans == blas.NoTrans +// C = alpha*Aᵀ*A + beta*C if trans == blas.Trans +// // where alpha and beta are scalars, C is an n×n symmetric matrix and A is // an n×k matrix in the first case and a k×n matrix in the second case. func (Implementation) Zsyrk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, beta complex128, c []complex128, ldc int) { @@ -1101,8 +1115,10 @@ func (Implementation) Zsyrk(uplo blas.Uplo, trans blas.Transpose, n, k int, alph } // Zsyr2k performs one of the symmetric rank-2k operations -// C = alpha*A*Bᵀ + alpha*B*Aᵀ + beta*C if trans == blas.NoTrans -// C = alpha*Aᵀ*B + alpha*Bᵀ*A + beta*C if trans == blas.Trans +// +// C = alpha*A*Bᵀ + alpha*B*Aᵀ + beta*C if trans == blas.NoTrans +// C = alpha*Aᵀ*B + alpha*Bᵀ*A + beta*C if trans == blas.Trans +// // where alpha and beta are scalars, C is an n×n symmetric matrix and A and B // are n×k matrices in the first case and k×n matrices in the second case. func (Implementation) Zsyr2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { @@ -1274,13 +1290,16 @@ func (Implementation) Zsyr2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alp } // Ztrmm performs one of the matrix-matrix operations -// B = alpha * op(A) * B if side == blas.Left, -// B = alpha * B * op(A) if side == blas.Right, +// +// B = alpha * op(A) * B if side == blas.Left, +// B = alpha * B * op(A) if side == blas.Right, +// // where alpha is a scalar, B is an m×n matrix, A is a unit, or non-unit, // upper or lower triangular matrix and op(A) is one of -// op(A) = A if trans == blas.NoTrans, -// op(A) = Aᵀ if trans == blas.Trans, -// op(A) = Aᴴ if trans == blas.ConjTrans. +// +// op(A) = A if trans == blas.NoTrans, +// op(A) = Aᵀ if trans == blas.Trans, +// op(A) = Aᴴ if trans == blas.ConjTrans. func (Implementation) Ztrmm(side blas.Side, uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) { na := m if side == blas.Right { @@ -1502,13 +1521,17 @@ func (Implementation) Ztrmm(side blas.Side, uplo blas.Uplo, trans blas.Transpose } // Ztrsm solves one of the matrix equations -// op(A) * X = alpha * B if side == blas.Left, -// X * op(A) = alpha * B if side == blas.Right, +// +// op(A) * X = alpha * B if side == blas.Left, +// X * op(A) = alpha * B if side == blas.Right, +// // where alpha is a scalar, X and B are m×n matrices, A is a unit or // non-unit, upper or lower triangular matrix and op(A) is one of -// op(A) = A if transA == blas.NoTrans, -// op(A) = Aᵀ if transA == blas.Trans, -// op(A) = Aᴴ if transA == blas.ConjTrans. +// +// op(A) = A if transA == blas.NoTrans, +// op(A) = Aᵀ if transA == blas.Trans, +// op(A) = Aᴴ if transA == blas.ConjTrans. +// // On return the matrix X is overwritten on B. func (Implementation) Ztrsm(side blas.Side, uplo blas.Uplo, transA blas.Transpose, diag blas.Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) { na := m diff --git a/blas/gonum/level3cmplx64.go b/blas/gonum/level3cmplx64.go index acda8489..b7fb5a2c 100644 --- a/blas/gonum/level3cmplx64.go +++ b/blas/gonum/level3cmplx64.go @@ -16,9 +16,13 @@ import ( var _ blas.Complex64Level3 = Implementation{} // Cgemm performs one of the matrix-matrix operations -// C = alpha * op(A) * op(B) + beta * C +// +// C = alpha * op(A) * op(B) + beta * C +// // where op(X) is one of -// op(X) = X or op(X) = Xᵀ or op(X) = Xᴴ, +// +// op(X) = X or op(X) = Xᵀ or op(X) = Xᴴ, +// // alpha and beta are scalars, and A, B and C are matrices, with op(A) an m×k matrix, // op(B) a k×n matrix and C an m×n matrix. // @@ -262,8 +266,10 @@ func (Implementation) Cgemm(tA, tB blas.Transpose, m, n, k int, alpha complex64, } // Chemm performs one of the matrix-matrix operations -// C = alpha*A*B + beta*C if side == blas.Left -// C = alpha*B*A + beta*C if side == blas.Right +// +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// // where alpha and beta are scalars, A is an m×m or n×n hermitian matrix and B // and C are m×n matrices. The imaginary parts of the diagonal elements of A are // assumed to be zero. @@ -411,8 +417,10 @@ func (Implementation) Chemm(side blas.Side, uplo blas.Uplo, m, n int, alpha comp } // Cherk performs one of the hermitian rank-k operations -// C = alpha*A*Aᴴ + beta*C if trans == blas.NoTrans -// C = alpha*Aᴴ*A + beta*C if trans == blas.ConjTrans +// +// C = alpha*A*Aᴴ + beta*C if trans == blas.NoTrans +// C = alpha*Aᴴ*A + beta*C if trans == blas.ConjTrans +// // where alpha and beta are real scalars, C is an n×n hermitian matrix and A is // an n×k matrix in the first case and a k×n matrix in the second case. // @@ -611,8 +619,10 @@ func (Implementation) Cherk(uplo blas.Uplo, trans blas.Transpose, n, k int, alph } // Cher2k performs one of the hermitian rank-2k operations -// C = alpha*A*Bᴴ + conj(alpha)*B*Aᴴ + beta*C if trans == blas.NoTrans -// C = alpha*Aᴴ*B + conj(alpha)*Bᴴ*A + beta*C if trans == blas.ConjTrans +// +// C = alpha*A*Bᴴ + conj(alpha)*B*Aᴴ + beta*C if trans == blas.NoTrans +// C = alpha*Aᴴ*B + conj(alpha)*Bᴴ*A + beta*C if trans == blas.ConjTrans +// // where alpha and beta are scalars with beta real, C is an n×n hermitian matrix // and A and B are n×k matrices in the first case and k×n matrices in the second case. // @@ -809,8 +819,10 @@ func (Implementation) Cher2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alp } // Csymm performs one of the matrix-matrix operations -// C = alpha*A*B + beta*C if side == blas.Left -// C = alpha*B*A + beta*C if side == blas.Right +// +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// // where alpha and beta are scalars, A is an m×m or n×n symmetric matrix and B // and C are m×n matrices. // @@ -955,8 +967,10 @@ func (Implementation) Csymm(side blas.Side, uplo blas.Uplo, m, n int, alpha comp } // Csyrk performs one of the symmetric rank-k operations -// C = alpha*A*Aᵀ + beta*C if trans == blas.NoTrans -// C = alpha*Aᵀ*A + beta*C if trans == blas.Trans +// +// C = alpha*A*Aᵀ + beta*C if trans == blas.NoTrans +// C = alpha*Aᵀ*A + beta*C if trans == blas.Trans +// // where alpha and beta are scalars, C is an n×n symmetric matrix and A is // an n×k matrix in the first case and a k×n matrix in the second case. // @@ -1115,8 +1129,10 @@ func (Implementation) Csyrk(uplo blas.Uplo, trans blas.Transpose, n, k int, alph } // Csyr2k performs one of the symmetric rank-2k operations -// C = alpha*A*Bᵀ + alpha*B*Aᵀ + beta*C if trans == blas.NoTrans -// C = alpha*Aᵀ*B + alpha*Bᵀ*A + beta*C if trans == blas.Trans +// +// C = alpha*A*Bᵀ + alpha*B*Aᵀ + beta*C if trans == blas.NoTrans +// C = alpha*Aᵀ*B + alpha*Bᵀ*A + beta*C if trans == blas.Trans +// // where alpha and beta are scalars, C is an n×n symmetric matrix and A and B // are n×k matrices in the first case and k×n matrices in the second case. // @@ -1290,13 +1306,16 @@ func (Implementation) Csyr2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alp } // Ctrmm performs one of the matrix-matrix operations -// B = alpha * op(A) * B if side == blas.Left, -// B = alpha * B * op(A) if side == blas.Right, +// +// B = alpha * op(A) * B if side == blas.Left, +// B = alpha * B * op(A) if side == blas.Right, +// // where alpha is a scalar, B is an m×n matrix, A is a unit, or non-unit, // upper or lower triangular matrix and op(A) is one of -// op(A) = A if trans == blas.NoTrans, -// op(A) = Aᵀ if trans == blas.Trans, -// op(A) = Aᴴ if trans == blas.ConjTrans. +// +// op(A) = A if trans == blas.NoTrans, +// op(A) = Aᵀ if trans == blas.Trans, +// op(A) = Aᴴ if trans == blas.ConjTrans. // // Complex64 implementations are autogenerated and not directly tested. func (Implementation) Ctrmm(side blas.Side, uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) { @@ -1520,13 +1539,17 @@ func (Implementation) Ctrmm(side blas.Side, uplo blas.Uplo, trans blas.Transpose } // Ctrsm solves one of the matrix equations -// op(A) * X = alpha * B if side == blas.Left, -// X * op(A) = alpha * B if side == blas.Right, +// +// op(A) * X = alpha * B if side == blas.Left, +// X * op(A) = alpha * B if side == blas.Right, +// // where alpha is a scalar, X and B are m×n matrices, A is a unit or // non-unit, upper or lower triangular matrix and op(A) is one of -// op(A) = A if transA == blas.NoTrans, -// op(A) = Aᵀ if transA == blas.Trans, -// op(A) = Aᴴ if transA == blas.ConjTrans. +// +// op(A) = A if transA == blas.NoTrans, +// op(A) = Aᵀ if transA == blas.Trans, +// op(A) = Aᴴ if transA == blas.ConjTrans. +// // On return the matrix X is overwritten on B. // // Complex64 implementations are autogenerated and not directly tested. diff --git a/blas/gonum/level3float32.go b/blas/gonum/level3float32.go index 2760039d..4b813fbc 100644 --- a/blas/gonum/level3float32.go +++ b/blas/gonum/level3float32.go @@ -14,10 +14,12 @@ import ( var _ blas.Float32Level3 = Implementation{} // Strsm solves one of the matrix equations -// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left -// Aᵀ * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right -// X * Aᵀ = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// +// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left +// Aᵀ * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right +// X * Aᵀ = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// // where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and alpha is a // scalar. // @@ -219,8 +221,10 @@ func (Implementation) Strsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas } // Ssymm performs one of the matrix-matrix operations -// C = alpha * A * B + beta * C if side == blas.Left -// C = alpha * B * A + beta * C if side == blas.Right +// +// C = alpha * A * B + beta * C if side == blas.Left +// C = alpha * B * A + beta * C if side == blas.Right +// // where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and alpha // is a scalar. // @@ -364,8 +368,10 @@ func (Implementation) Ssymm(s blas.Side, ul blas.Uplo, m, n int, alpha float32, } // Ssyrk performs one of the symmetric rank-k operations -// C = alpha * A * Aᵀ + beta * C if tA == blas.NoTrans -// C = alpha * Aᵀ * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// +// C = alpha * A * Aᵀ + beta * C if tA == blas.NoTrans +// C = alpha * Aᵀ * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// // where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and // beta are scalars. // @@ -516,8 +522,10 @@ func (Implementation) Ssyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha flo } // Ssyr2k performs one of the symmetric rank 2k operations -// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if tA == blas.NoTrans -// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// +// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if tA == blas.NoTrans +// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// // where A and B are n×k or k×n matrices, C is an n×n symmetric matrix, and // alpha and beta are scalars. // @@ -717,10 +725,12 @@ func (Implementation) Ssyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha fl } // Strmm performs one of the matrix-matrix operations -// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left -// B = alpha * Aᵀ * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right -// B = alpha * B * Aᵀ if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// +// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left +// B = alpha * Aᵀ * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right +// B = alpha * B * Aᵀ if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// // where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is a scalar. // // Float32 implementations are autogenerated and not directly tested. diff --git a/blas/gonum/level3float64.go b/blas/gonum/level3float64.go index 429b3a16..0d203513 100644 --- a/blas/gonum/level3float64.go +++ b/blas/gonum/level3float64.go @@ -12,10 +12,12 @@ import ( var _ blas.Float64Level3 = Implementation{} // Dtrsm solves one of the matrix equations -// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left -// Aᵀ * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right -// X * Aᵀ = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// +// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left +// Aᵀ * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right +// X * Aᵀ = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// // where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and alpha is a // scalar. // @@ -215,8 +217,10 @@ func (Implementation) Dtrsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas } // Dsymm performs one of the matrix-matrix operations -// C = alpha * A * B + beta * C if side == blas.Left -// C = alpha * B * A + beta * C if side == blas.Right +// +// C = alpha * A * B + beta * C if side == blas.Left +// C = alpha * B * A + beta * C if side == blas.Right +// // where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and alpha // is a scalar. func (Implementation) Dsymm(s blas.Side, ul blas.Uplo, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { @@ -358,8 +362,10 @@ func (Implementation) Dsymm(s blas.Side, ul blas.Uplo, m, n int, alpha float64, } // Dsyrk performs one of the symmetric rank-k operations -// C = alpha * A * Aᵀ + beta * C if tA == blas.NoTrans -// C = alpha * Aᵀ * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// +// C = alpha * A * Aᵀ + beta * C if tA == blas.NoTrans +// C = alpha * Aᵀ * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// // where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and // beta are scalars. func (Implementation) Dsyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, beta float64, c []float64, ldc int) { @@ -508,8 +514,10 @@ func (Implementation) Dsyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha flo } // Dsyr2k performs one of the symmetric rank 2k operations -// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if tA == blas.NoTrans -// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// +// C = alpha * A * Bᵀ + alpha * B * Aᵀ + beta * C if tA == blas.NoTrans +// C = alpha * Aᵀ * B + alpha * Bᵀ * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// // where A and B are n×k or k×n matrices, C is an n×n symmetric matrix, and // alpha and beta are scalars. func (Implementation) Dsyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { @@ -707,10 +715,12 @@ func (Implementation) Dsyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha fl } // Dtrmm performs one of the matrix-matrix operations -// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left -// B = alpha * Aᵀ * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right -// B = alpha * B * Aᵀ if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// +// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left +// B = alpha * Aᵀ * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right +// B = alpha * B * Aᵀ if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// // where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is a scalar. func (Implementation) Dtrmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { if s != blas.Left && s != blas.Right { diff --git a/blas/gonum/sgemm.go b/blas/gonum/sgemm.go index 7514c6c3..7b03ce46 100644 --- a/blas/gonum/sgemm.go +++ b/blas/gonum/sgemm.go @@ -15,10 +15,12 @@ import ( ) // Sgemm performs one of the matrix-matrix operations -// C = alpha * A * B + beta * C -// C = alpha * Aᵀ * B + beta * C -// C = alpha * A * Bᵀ + beta * C -// C = alpha * Aᵀ * Bᵀ + beta * C +// +// C = alpha * A * B + beta * C +// C = alpha * Aᵀ * B + beta * C +// C = alpha * A * Bᵀ + beta * C +// C = alpha * Aᵀ * Bᵀ + beta * C +// // where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is // an m×n matrix, and alpha and beta are scalars. tA and tB specify whether A or // B are transposed. diff --git a/blas/testblas/common.go b/blas/testblas/common.go index 258315f4..7d627b81 100644 --- a/blas/testblas/common.go +++ b/blas/testblas/common.go @@ -546,9 +546,13 @@ func rndComplex128(rnd *rand.Rand) complex128 { } // zmm returns the result of one of the matrix-matrix operations -// alpha * op(A) * op(B) + beta * C +// +// alpha * op(A) * op(B) + beta * C +// // where op(X) is one of -// op(X) = X or op(X) = Xᵀ or op(X) = Xᴴ, +// +// op(X) = X or op(X) = Xᵀ or op(X) = Xᴴ, +// // alpha and beta are scalars, and A, B and C are matrices, with op(A) an m×k matrix, // op(B) a k×n matrix and C an m×n matrix. // diff --git a/cmplxs/cmplxs.go b/cmplxs/cmplxs.go index 46f3c05e..e9f4f42a 100644 --- a/cmplxs/cmplxs.go +++ b/cmplxs/cmplxs.go @@ -117,7 +117,9 @@ func Complex(dst []complex128, real, imag []float64) []complex128 { // CumProd finds the cumulative product of elements of s and store it in // place into dst so that -// dst[i] = s[i] * s[i-1] * s[i-2] * ... * s[0] +// +// dst[i] = s[i] * s[i-1] * s[i-2] * ... * s[0] +// // It panics if the argument lengths do not match. func CumProd(dst, s []complex128) []complex128 { if len(dst) != len(s) { @@ -131,7 +133,9 @@ func CumProd(dst, s []complex128) []complex128 { // CumSum finds the cumulative sum of elements of s and stores it in place // into dst so that -// dst[i] = s[i] + s[i-1] + s[i-2] + ... + s[0] +// +// dst[i] = s[i] + s[i-1] + s[i-2] + ... + s[0] +// // It panics if the argument lengths do not match. func CumSum(dst, s []complex128) []complex128 { if len(dst) != len(s) { @@ -348,7 +352,7 @@ func Imag(dst []float64, src []complex128) []float64 { // will return all zeros if l or u is zero. // Also returns the mutated slice dst, so that it can be used in range, like: // -// for i, x := range LogSpan(dst, l, u) { ... } +// for i, x := range LogSpan(dst, l, u) { ... } func LogSpan(dst []complex128, l, u complex128) []complex128 { Span(dst, cmplx.Log(l), cmplx.Log(u)) for i := range dst { @@ -627,7 +631,7 @@ func ScaleTo(dst []complex128, c complex128, s []complex128) []complex128 { // Span also returns the mutated slice dst, so that it can be used in range expressions, // like: // -// for i, x := range Span(dst, l, u) { ... } +// for i, x := range Span(dst, l, u) { ... } func Span(dst []complex128, l, u complex128) []complex128 { n := len(dst) if n < 2 { diff --git a/cmplxs/cscalar/cscalar.go b/cmplxs/cscalar/cscalar.go index 3c9e1bfe..e377372c 100644 --- a/cmplxs/cscalar/cscalar.go +++ b/cmplxs/cscalar/cscalar.go @@ -23,7 +23,8 @@ const minNormalFloat64 = 0x1p-1022 // EqualWithinRel returns true when the difference between a and b // is not greater than tol times the greater absolute value of a and b, -// abs(a-b) <= tol * max(abs(a), abs(b)). +// +// abs(a-b) <= tol * max(abs(a), abs(b)). func EqualWithinRel(a, b complex128, tol float64) bool { if a == b { return true @@ -61,9 +62,10 @@ func ParseWithNA(s, missing string) (value complex128, weight float64, err error // Round returns the half away from zero rounded value of x with prec precision. // // Special cases are: -// Round(±0) = +0 -// Round(±Inf) = ±Inf -// Round(NaN) = NaN +// +// Round(±0) = +0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN func Round(x complex128, prec int) complex128 { if x == 0 { // Make sure zero is returned @@ -76,9 +78,10 @@ func Round(x complex128, prec int) complex128 { // RoundEven returns the half even rounded value of x with prec precision. // // Special cases are: -// RoundEven(±0) = +0 -// RoundEven(±Inf) = ±Inf -// RoundEven(NaN) = NaN +// +// RoundEven(±0) = +0 +// RoundEven(±Inf) = ±Inf +// RoundEven(NaN) = NaN func RoundEven(x complex128, prec int) complex128 { if x == 0 { // Make sure zero is returned diff --git a/diff/fd/crosslaplacian.go b/diff/fd/crosslaplacian.go index 22f2e727..96643d0d 100644 --- a/diff/fd/crosslaplacian.go +++ b/diff/fd/crosslaplacian.go @@ -12,7 +12,9 @@ import ( // CrossLaplacian computes a Laplacian-like quantity for a function of two vectors // at the locations x and y. // It computes -// ∇_y · ∇_x f(x,y) = \sum_i ∂^2 f(x,y)/∂x_i ∂y_i +// +// ∇_y · ∇_x f(x,y) = \sum_i ∂^2 f(x,y)/∂x_i ∂y_i +// // The two input vector lengths must be the same. // // Finite difference formula and other options are specified by settings. If diff --git a/diff/fd/diff.go b/diff/fd/diff.go index c5499aa9..a0496e8c 100644 --- a/diff/fd/diff.go +++ b/diff/fd/diff.go @@ -17,7 +17,9 @@ type Point struct { // Formula represents a finite difference formula on a regularly spaced grid // that approximates the derivative of order k of a function f at x as -// d^k f(x) ≈ (1 / Step^k) * \sum_i Coeff_i * f(x + Step * Loc_i). +// +// d^k f(x) ≈ (1 / Step^k) * \sum_i Coeff_i * f(x + Step * Loc_i). +// // Step must be positive, or the finite difference formula will panic. type Formula struct { // Stencil is the set of sampling Points which are used to estimate the diff --git a/diff/fd/hessian.go b/diff/fd/hessian.go index 80616b2b..d053bf13 100644 --- a/diff/fd/hessian.go +++ b/diff/fd/hessian.go @@ -13,7 +13,9 @@ import ( // Hessian approximates the Hessian matrix of the multivariate function f at // the location x. That is -// H_{i,j} = ∂^2 f(x)/∂x_i ∂x_j +// +// H_{i,j} = ∂^2 f(x)/∂x_i ∂x_j +// // The resulting H will be stored in dst. Finite difference formula and other // options are specified by settings. If settings is nil, the Hessian will be // estimated using the Forward formula and a default step size. diff --git a/diff/fd/jacobian.go b/diff/fd/jacobian.go index 88112563..077c7c2b 100644 --- a/diff/fd/jacobian.go +++ b/diff/fd/jacobian.go @@ -28,13 +28,16 @@ type JacobianSettings struct { // The Jacobian matrix J is the matrix of all first-order partial derivatives of f. // If f maps an n-dimensional vector x to an m-dimensional vector y = f(x), J is // an m×n matrix whose elements are given as -// J_{i,j} = ∂f_i/∂x_j, +// +// J_{i,j} = ∂f_i/∂x_j, +// // or expanded out -// [ ∂f_1/∂x_1 ... ∂f_1/∂x_n ] -// [ . . . ] -// J = [ . . . ] -// [ . . . ] -// [ ∂f_m/∂x_1 ... ∂f_m/∂x_n ] +// +// [ ∂f_1/∂x_1 ... ∂f_1/∂x_n ] +// [ . . . ] +// J = [ . . . ] +// [ . . . ] +// [ ∂f_m/∂x_1 ... ∂f_m/∂x_n ] // // dst must be non-nil, the number of its columns must equal the length of x, and // the derivative order of the formula must be 1, otherwise Jacobian will panic. diff --git a/diff/fd/laplacian.go b/diff/fd/laplacian.go index 5f1a27b0..7f77b0b3 100644 --- a/diff/fd/laplacian.go +++ b/diff/fd/laplacian.go @@ -8,7 +8,9 @@ import "sync" // Laplacian computes the Laplacian of the multivariate function f at the location // x. That is, Laplacian returns -// ∆ f(x) = ∇ · ∇ f(x) = \sum_i ∂^2 f(x)/∂x_i^2 +// +// ∆ f(x) = ∇ · ∇ f(x) = \sum_i ∂^2 f(x)/∂x_i^2 +// // The finite difference formula and other options are specified by settings. // The order of the difference formula must be 2 or Laplacian will panic. func Laplacian(f func(x []float64) float64, x []float64, settings *Settings) float64 { diff --git a/diff/fd/watson_test.go b/diff/fd/watson_test.go index b115528c..b3c261d3 100644 --- a/diff/fd/watson_test.go +++ b/diff/fd/watson_test.go @@ -14,10 +14,10 @@ import "gonum.org/v1/gonum/mat" // derivative methods. // // References: -// - Kowalik, J.S., Osborne, M.R.: Methods for Unconstrained Optimization -// Problems. Elsevier North-Holland, New York, 1968 -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Kowalik, J.S., Osborne, M.R.: Methods for Unconstrained Optimization +// Problems. Elsevier North-Holland, New York, 1968 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type Watson struct{} func (Watson) Func(x []float64) (sum float64) { diff --git a/dsp/window/cmd/leakage/leakage.go b/dsp/window/cmd/leakage/leakage.go index 9fea8d74..23a1bc67 100644 --- a/dsp/window/cmd/leakage/leakage.go +++ b/dsp/window/cmd/leakage/leakage.go @@ -7,31 +7,33 @@ // to be used to verify window behaviour against foreign implementations. // For example, the behavior of a NumPy window can be captured using this // python code: -// import matplotlib.pyplot as plt -// import numpy as np -// from numpy.fft import fft // -// window = np.blackman(20) -// print("# beta = %f" % np.mean(window)) +// import matplotlib.pyplot as plt +// import numpy as np +// from numpy.fft import fft // -// plt.figure() +// window = np.blackman(20) +// print("# beta = %f" % np.mean(window)) // -// A = fft(window, 1000) -// mag = np.abs(A) -// with np.errstate(divide='ignore', invalid='ignore'): -// mag = 20 * np.log10(mag) -// mag -= max(mag) -// freq = np.linspace(0, len(window)/2, len(A)/2) +// plt.figure() // -// for m in mag[:len(mag)/2]: -// print(m) +// A = fft(window, 1000) +// mag = np.abs(A) +// with np.errstate(divide='ignore', invalid='ignore'): +// mag = 20 * np.log10(mag) +// mag -= max(mag) +// freq = np.linspace(0, len(window)/2, len(A)/2) // -// plt.plot(freq, mag[:len(mag)/2]) -// plt.title("Spectral leakage") -// plt.ylabel("Amplitude (dB)") -// plt.xlabel("DFT bin") +// for m in mag[:len(mag)/2]: +// print(m) +// +// plt.plot(freq, mag[:len(mag)/2]) +// plt.title("Spectral leakage") +// plt.ylabel("Amplitude (dB)") +// plt.xlabel("DFT bin") +// +// plt.show() // -// plt.show() // and then be exported to leakage and compared with the Gonum // implementation. package main diff --git a/dsp/window/doc.go b/dsp/window/doc.go index 5c1eddf7..bcb2b280 100644 --- a/dsp/window/doc.go +++ b/dsp/window/doc.go @@ -9,7 +9,7 @@ // when performing a Fourier transform on a signal of limited length. // See https://en.wikipedia.org/wiki/Window_function for more details. // -// Spectral leakage parameters +// # Spectral leakage parameters // // Application of window functions to an input will result in changes // to the frequency content of the signal in an effect called spectral @@ -23,7 +23,8 @@ // constant component of the spectrum resulting from use of the window // compared to that produced using the rectangular window, expressed in // a logarithmic scale. -// β_w = 20 log10(A_w / A_rect) dB +// +// β_w = 20 log10(A_w / A_rect) dB // // The ΔF_0 parameter describes the normalized width of the main lobe of // the frequency spectrum at zero amplitude. @@ -34,7 +35,9 @@ // The K parameter describes the relative width of the main lobe of the // frequency spectrum produced by the window compared with the rectangular // window. The rectangular window has the lowest ΔF_0 at a value of 2. -// K_w = ΔF_0_w/ΔF_0_rect. +// +// K_w = ΔF_0_w/ΔF_0_rect. +// // The value of K divides windows into high resolution windows (K≤3) and // low resolution windows (K>3). // diff --git a/dsp/window/window.go b/dsp/window/window.go index 92e8fdea..3acdfacf 100644 --- a/dsp/window/window.go +++ b/dsp/window/window.go @@ -16,7 +16,9 @@ import "math" // limited length sequence of values without any modification. // // The sequence weights are -// w[k] = 1, +// +// w[k] = 1, +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 2, ΔF_0.5 = 0.89, K = 1, ɣ_max = -13, β = 0. @@ -31,7 +33,9 @@ func Rectangular(seq []float64) []float64 { // Sine window is a high-resolution window. // // The sequence weights are -// w[k] = sin(π*k/(N-1)), +// +// w[k] = sin(π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 3, ΔF_0.5 = 1.23, K = 1.5, ɣ_max = -23, β = -3.93. @@ -50,7 +54,9 @@ func Sine(seq []float64) []float64 { // The Lanczos window is a high-resolution window. // // The sequence weights are -// w[k] = sinc(2*k/(N-1) - 1), +// +// w[k] = sinc(2*k/(N-1) - 1), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 3.24, ΔF_0.5 = 1.3, K = 1.62, ɣ_max = -26.4, β = -4.6. @@ -75,7 +81,9 @@ func Lanczos(seq []float64) []float64 { // The Triangular window is a high-resolution window. // // The sequence weights are -// w[k] = 1 - |k/A -1|, A=(N-1)/2, +// +// w[k] = 1 - |k/A -1|, A=(N-1)/2, +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 4, ΔF_0.5 = 1.33, K = 2, ɣ_max = -26.5, β = -6. @@ -94,7 +102,9 @@ func Triangular(seq []float64) []float64 { // The Hann window is a high-resolution window. // // The sequence weights are -// w[k] = 0.5*(1 - cos(2*π*k/(N-1))), +// +// w[k] = 0.5*(1 - cos(2*π*k/(N-1))), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 4, ΔF_0.5 = 1.5, K = 2, ɣ_max = -31.5, β = -6. @@ -114,7 +124,9 @@ func Hann(seq []float64) []float64 { // The Bartlett-Hann window is a high-resolution window. // // The sequence weights are -// w[k] = 0.62 - 0.48*|k/(N-1)-0.5| - 0.38*cos(2*π*k/(N-1)), +// +// w[k] = 0.62 - 0.48*|k/(N-1)-0.5| - 0.38*cos(2*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 4, ΔF_0.5 = 1.45, K = 2, ɣ_max = -35.9, β = -6. @@ -140,7 +152,9 @@ func BartlettHann(seq []float64) []float64 { // the highest ɣ_max. // // The sequence weights are -// w[k] = 25/46 - 21/46 * cos(2*π*k/(N-1)), +// +// w[k] = 25/46 - 21/46 * cos(2*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 4, ΔF_0.5 = 1.33, K = 2, ɣ_max = -42, β = -5.37. @@ -165,7 +179,9 @@ func Hamming(seq []float64) []float64 { // The Blackman window is a high-resolution window. // // The sequence weights are -// w[k] = 0.42 - 0.5*cos(2*π*k/(N-1)) + 0.08*cos(4*π*k/(N-1)), +// +// w[k] = 0.42 - 0.5*cos(2*π*k/(N-1)) + 0.08*cos(4*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 6, ΔF_0.5 = 1.7, K = 3, ɣ_max = -58, β = -7.54. @@ -192,8 +208,10 @@ func Blackman(seq []float64) []float64 { // The Blackman-Harris window is a low-resolution window. // // The sequence weights are -// w[k] = 0.35875 - 0.48829*cos(2*π*k/(N-1)) + -// 0.14128*cos(4*π*k/(N-1)) - 0.01168*cos(6*π*k/(N-1)), +// +// w[k] = 0.35875 - 0.48829*cos(2*π*k/(N-1)) + +// 0.14128*cos(4*π*k/(N-1)) - 0.01168*cos(6*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 8, ΔF_0.5 = 1.97, K = 4, ɣ_max = -92, β = -8.91. @@ -220,8 +238,10 @@ func BlackmanHarris(seq []float64) []float64 { // The Nuttall window is a low-resolution window. // // The sequence weights are -// w[k] = 0.355768 - 0.487396*cos(2*π*k/(N-1)) + 0.144232*cos(4*π*k/(N-1)) - -// 0.012604*cos(6*π*k/(N-1)), +// +// w[k] = 0.355768 - 0.487396*cos(2*π*k/(N-1)) + 0.144232*cos(4*π*k/(N-1)) - +// 0.012604*cos(6*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 8, ΔF_0.5 = 1.98, K = 4, ɣ_max = -93, β = -9. @@ -249,8 +269,10 @@ func Nuttall(seq []float64) []float64 { // The Blackman-Nuttall window is a low-resolution window. // // The sequence weights are -// w[k] = 0.3635819 - 0.4891775*cos(2*π*k/(N-1)) + 0.1365995*cos(4*π*k/(N-1)) - -// 0.0106411*cos(6*π*k/(N-1)), +// +// w[k] = 0.3635819 - 0.4891775*cos(2*π*k/(N-1)) + 0.1365995*cos(4*π*k/(N-1)) - +// 0.0106411*cos(6*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 8, ΔF_0.5 = 1.94, K = 4, ɣ_max = -98, β = -8.8. @@ -278,9 +300,11 @@ func BlackmanNuttall(seq []float64) []float64 { // The Flat Top window is a low-resolution window. // // The sequence weights are -// w[k] = 0.21557895 - 0.41663158*cos(2*π*k/(N-1)) + -// 0.277263158*cos(4*π*k/(N-1)) - 0.083578947*cos(6*π*k/(N-1)) + -// 0.006947368*cos(4*π*k/(N-1)), +// +// w[k] = 0.21557895 - 0.41663158*cos(2*π*k/(N-1)) + +// 0.277263158*cos(4*π*k/(N-1)) - 0.083578947*cos(6*π*k/(N-1)) + +// 0.006947368*cos(4*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 10, ΔF_0.5 = 3.72, K = 5, ɣ_max = -93.0, β = -13.34. diff --git a/dsp/window/window_complex.go b/dsp/window/window_complex.go index c1909290..404d5232 100644 --- a/dsp/window/window_complex.go +++ b/dsp/window/window_complex.go @@ -16,7 +16,9 @@ import "math" // limited length sequence of values without any modification. // // The sequence weights are -// w[k] = 1, +// +// w[k] = 1, +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 2, ΔF_0.5 = 0.89, K = 1, ɣ_max = -13, β = 0. @@ -32,7 +34,9 @@ func RectangularComplex(seq []complex128) []complex128 { // Sine window is a high-resolution window. // // The sequence weights are -// w[k] = sin(π*k/(N-1)), +// +// w[k] = sin(π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 3, ΔF_0.5 = 1.23, K = 1.5, ɣ_max = -23, β = -3.93. @@ -53,7 +57,9 @@ func SineComplex(seq []complex128) []complex128 { // The Lanczos window is a high-resolution window. // // The sequence weights are -// w[k] = sinc(2*k/(N-1) - 1), +// +// w[k] = sinc(2*k/(N-1) - 1), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 3.24, ΔF_0.5 = 1.3, K = 1.62, ɣ_max = -26.4, β = -4.6. @@ -79,7 +85,9 @@ func LanczosComplex(seq []complex128) []complex128 { // The Triangular window is a high-resolution window. // // The sequence weights are -// w[k] = 1 - |k/A -1|, A=(N-1)/2, +// +// w[k] = 1 - |k/A -1|, A=(N-1)/2, +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 4, ΔF_0.5 = 1.33, K = 2, ɣ_max = -26.5, β = -6. @@ -99,7 +107,9 @@ func TriangularComplex(seq []complex128) []complex128 { // The Hann window is a high-resolution window. // // The sequence weights are -// w[k] = 0.5*(1 - cos(2*π*k/(N-1))), +// +// w[k] = 0.5*(1 - cos(2*π*k/(N-1))), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 4, ΔF_0.5 = 1.5, K = 2, ɣ_max = -31.5, β = -6. @@ -120,7 +130,9 @@ func HannComplex(seq []complex128) []complex128 { // The Bartlett-Hann window is a high-resolution window. // // The sequence weights are -// w[k] = 0.62 - 0.48*|k/(N-1)-0.5| - 0.38*cos(2*π*k/(N-1)), +// +// w[k] = 0.62 - 0.48*|k/(N-1)-0.5| - 0.38*cos(2*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 4, ΔF_0.5 = 1.45, K = 2, ɣ_max = -35.9, β = -6. @@ -148,7 +160,9 @@ func BartlettHannComplex(seq []complex128) []complex128 { // the highest ɣ_max. // // The sequence weights are -// w[k] = 25/46 - 21/46 * cos(2*π*k/(N-1)), +// +// w[k] = 25/46 - 21/46 * cos(2*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 4, ΔF_0.5 = 1.33, K = 2, ɣ_max = -42, β = -5.37. @@ -174,7 +188,9 @@ func HammingComplex(seq []complex128) []complex128 { // The Blackman window is a high-resolution window. // // The sequence weights are -// w[k] = 0.42 - 0.5*cos(2*π*k/(N-1)) + 0.08*cos(4*π*k/(N-1)), +// +// w[k] = 0.42 - 0.5*cos(2*π*k/(N-1)) + 0.08*cos(4*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 6, ΔF_0.5 = 1.7, K = 3, ɣ_max = -58, β = -7.54. @@ -202,8 +218,10 @@ func BlackmanComplex(seq []complex128) []complex128 { // The Blackman-Harris window is a low-resolution window. // // The sequence weights are -// w[k] = 0.35875 - 0.48829*cos(2*π*k/(N-1)) + -// 0.14128*cos(4*π*k/(N-1)) - 0.01168*cos(6*π*k/(N-1)), +// +// w[k] = 0.35875 - 0.48829*cos(2*π*k/(N-1)) + +// 0.14128*cos(4*π*k/(N-1)) - 0.01168*cos(6*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 8, ΔF_0.5 = 1.97, K = 4, ɣ_max = -92, β = -8.91. @@ -232,8 +250,10 @@ func BlackmanHarrisComplex(seq []complex128) []complex128 { // The Nuttall window is a low-resolution window. // // The sequence weights are -// w[k] = 0.355768 - 0.487396*cos(2*π*k/(N-1)) + 0.144232*cos(4*π*k/(N-1)) - -// 0.012604*cos(6*π*k/(N-1)), +// +// w[k] = 0.355768 - 0.487396*cos(2*π*k/(N-1)) + 0.144232*cos(4*π*k/(N-1)) - +// 0.012604*cos(6*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 8, ΔF_0.5 = 1.98, K = 4, ɣ_max = -93, β = -9. @@ -262,8 +282,10 @@ func NuttallComplex(seq []complex128) []complex128 { // The Blackman-Nuttall window is a low-resolution window. // // The sequence weights are -// w[k] = 0.3635819 - 0.4891775*cos(2*π*k/(N-1)) + 0.1365995*cos(4*π*k/(N-1)) - -// 0.0106411*cos(6*π*k/(N-1)), +// +// w[k] = 0.3635819 - 0.4891775*cos(2*π*k/(N-1)) + 0.1365995*cos(4*π*k/(N-1)) - +// 0.0106411*cos(6*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 8, ΔF_0.5 = 1.94, K = 4, ɣ_max = -98, β = -8.8. @@ -292,9 +314,11 @@ func BlackmanNuttallComplex(seq []complex128) []complex128 { // The Flat Top window is a low-resolution window. // // The sequence weights are -// w[k] = 0.21557895 - 0.41663158*cos(2*π*k/(N-1)) + -// 0.277263158*cos(4*π*k/(N-1)) - 0.083578947*cos(6*π*k/(N-1)) + -// 0.006947368*cos(4*π*k/(N-1)), +// +// w[k] = 0.21557895 - 0.41663158*cos(2*π*k/(N-1)) + +// 0.277263158*cos(4*π*k/(N-1)) - 0.083578947*cos(6*π*k/(N-1)) + +// 0.006947368*cos(4*π*k/(N-1)), +// // for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters: ΔF_0 = 10, ΔF_0.5 = 3.72, K = 5, ɣ_max = -93.0, β = -13.34. diff --git a/dsp/window/window_parametric.go b/dsp/window/window_parametric.go index 8e34435b..b54dc0c2 100644 --- a/dsp/window/window_parametric.go +++ b/dsp/window/window_parametric.go @@ -14,20 +14,23 @@ import "math" // The Gaussian window is an adjustable window. // // The sequence weights are -// w[k] = exp(-0.5 * ((k-M)/(σ*M))² ), M = (N-1)/2, +// +// w[k] = exp(-0.5 * ((k-M)/(σ*M))² ), M = (N-1)/2, +// // for k=0,1,...,N-1 where N is the length of the window. // // The properties of the window depend on the value of σ (sigma). // It can be used as high or low resolution window, depending of the σ value. // // Spectral leakage parameters are summarized in the table: -// | σ=0.3 | σ=0.5 | σ=1.2 | -// -------|---------------------------| -// ΔF_0 | 8 | 3.4 | 2.2 | -// ΔF_0.5 | 1.82 | 1.2 | 0.94 | -// K | 4 | 1.7 | 1.1 | -// ɣ_max | -65 | -31.5 | -15.5 | -// β | -8.52 | -4.48 | -0.96 | +// +// | σ=0.3 | σ=0.5 | σ=1.2 | +// -------|---------------------------| +// ΔF_0 | 8 | 3.4 | 2.2 | +// ΔF_0.5 | 1.82 | 1.2 | 0.94 | +// K | 4 | 1.7 | 1.1 | +// ɣ_max | -65 | -31.5 | -15.5 | +// β | -8.52 | -4.48 | -0.96 | type Gaussian struct { Sigma float64 } @@ -63,18 +66,21 @@ func (g Gaussian) TransformComplex(seq []complex128) []complex128 { // The Tukey window is an adjustable window. // // The sequence weights are -// w[k] = 0.5 * (1 + cos(π*(|k - M| - αM)/((1-α) * M))), |k - M| ≥ αM -// = 1, |k - M| < αM +// +// w[k] = 0.5 * (1 + cos(π*(|k - M| - αM)/((1-α) * M))), |k - M| ≥ αM +// = 1, |k - M| < αM +// // with M = (N - 1)/2 for k=0,1,...,N-1 where N is the length of the window. // // Spectral leakage parameters are summarized in the table: -// | α=0.3 | α=0.5 | α=0.7 | -// -------|--------------------------| -// ΔF_0 | 1.33 | 1.22 | 1.13 | -// ΔF_0.5 | 1.28 | 1.16 | 1.04 | -// K | 0.67 | 0.61 | 0.57 | -// ɣ_max | -18.2 | -15.1 | -13.8 | -// β | -1.41 | -2.50 | -3.74 | +// +// | α=0.3 | α=0.5 | α=0.7 | +// -------|--------------------------| +// ΔF_0 | 1.33 | 1.22 | 1.13 | +// ΔF_0.5 | 1.28 | 1.16 | 1.04 | +// K | 0.67 | 0.61 | 0.57 | +// ɣ_max | -18.2 | -15.1 | -13.8 | +// β | -1.41 | -2.50 | -3.74 | type Tukey struct { Alpha float64 } diff --git a/floats/floats.go b/floats/floats.go index 0b93d05d..5db73a05 100644 --- a/floats/floats.go +++ b/floats/floats.go @@ -361,7 +361,7 @@ func HasNaN(s []float64) bool { // will return all zeros if l or u is zero. // Also returns the mutated slice dst, so that it can be used in range, like: // -// for i, x := range LogSpan(dst, l, u) { ... } +// for i, x := range LogSpan(dst, l, u) { ... } func LogSpan(dst []float64, l, u float64) []float64 { Span(dst, math.Log(l), math.Log(u)) for i := range dst { @@ -681,7 +681,7 @@ func ScaleTo(dst []float64, c float64, s []float64) []float64 { // Span also returns the mutated slice dst, so that it can be used in range expressions, // like: // -// for i, x := range Span(dst, l, u) { ... } +// for i, x := range Span(dst, l, u) { ... } func Span(dst []float64, l, u float64) []float64 { n := len(dst) if n < 2 { @@ -765,8 +765,8 @@ func Sum(s []float64) float64 { } // Within returns the first index i where s[i] <= v < s[i+1]. Within panics if: -// - len(s) < 2 -// - s is not sorted +// - len(s) < 2 +// - s is not sorted func Within(s []float64, v float64) int { if len(s) < 2 { panic(shortSpan) diff --git a/floats/scalar/scalar.go b/floats/scalar/scalar.go index dab10fbc..46bf06b3 100644 --- a/floats/scalar/scalar.go +++ b/floats/scalar/scalar.go @@ -21,7 +21,8 @@ const minNormalFloat64 = 0x1p-1022 // EqualWithinRel returns true when the difference between a and b // is not greater than tol times the greater absolute value of a and b, -// abs(a-b) <= tol * max(abs(a), abs(b)). +// +// abs(a-b) <= tol * max(abs(a), abs(b)). func EqualWithinRel(a, b, tol float64) bool { if a == b { return true @@ -105,9 +106,10 @@ func ParseWithNA(s, missing string) (value, weight float64, err error) { // Round returns the half away from zero rounded value of x with prec precision. // // Special cases are: -// Round(±0) = +0 -// Round(±Inf) = ±Inf -// Round(NaN) = NaN +// +// Round(±0) = +0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN func Round(x float64, prec int) float64 { if x == 0 { // Make sure zero is returned @@ -135,9 +137,10 @@ func Round(x float64, prec int) float64 { // RoundEven returns the half even rounded value of x with prec precision. // // Special cases are: -// RoundEven(±0) = +0 -// RoundEven(±Inf) = ±Inf -// RoundEven(NaN) = NaN +// +// RoundEven(±0) = +0 +// RoundEven(±Inf) = ±Inf +// RoundEven(NaN) = NaN func RoundEven(x float64, prec int) float64 { if x == 0 { // Make sure zero is returned diff --git a/graph/community/louvain_common.go b/graph/community/louvain_common.go index 831dc4a2..c54d33bf 100644 --- a/graph/community/louvain_common.go +++ b/graph/community/louvain_common.go @@ -21,9 +21,12 @@ import ( // Q will panic if g has any edge with negative edge weight. // // If g is undirected, Q is calculated according to -// Q = 1/2m \sum_{ij} [ A_{ij} - (\gamma k_i k_j)/2m ] \delta(c_i,c_j), +// +// Q = 1/2m \sum_{ij} [ A_{ij} - (\gamma k_i k_j)/2m ] \delta(c_i,c_j), +// // If g is directed, it is calculated according to -// Q = 1/m \sum_{ij} [ A_{ij} - (\gamma k_i^in k_j^out)/m ] \delta(c_i,c_j). +// +// Q = 1/m \sum_{ij} [ A_{ij} - (\gamma k_i^in k_j^out)/m ] \delta(c_i,c_j). // // graph.Undirect may be used as a shim to allow calculation of Q for // directed graphs with the undirected modularity function. @@ -73,9 +76,12 @@ type ReducedGraph interface { // generator. Modularize will panic if g has any edge with negative edge weight. // // If g is undirected it is modularised to minimise -// Q = 1/2m \sum_{ij} [ A_{ij} - (\gamma k_i k_j)/2m ] \delta(c_i,c_j), +// +// Q = 1/2m \sum_{ij} [ A_{ij} - (\gamma k_i k_j)/2m ] \delta(c_i,c_j), +// // If g is directed it is modularised to minimise -// Q = 1/m \sum_{ij} [ A_{ij} - (\gamma k_i^in k_j^out)/m ] \delta(c_i,c_j). +// +// Q = 1/m \sum_{ij} [ A_{ij} - (\gamma k_i^in k_j^out)/m ] \delta(c_i,c_j). // // The concrete type of the ReducedGraph will be a pointer to either a // ReducedUndirected or a ReducedDirected depending on the type of g. @@ -120,9 +126,12 @@ type Multiplex interface { // negative edge weight. // // If g is undirected, Q is calculated according to -// Q_{layer} = w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i k_j)/2m_{layer} ] \delta(c_i,c_j), +// +// Q_{layer} = w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i k_j)/2m_{layer} ] \delta(c_i,c_j), +// // If g is directed, it is calculated according to -// Q_{layer} = w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i^in k_j^out)/m_{layer} ] \delta(c_i,c_j). +// +// Q_{layer} = w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i^in k_j^out)/m_{layer} ] \delta(c_i,c_j). // // Note that Q values for multiplex graphs are not scaled by the total layer edge weight. // @@ -183,9 +192,12 @@ type ReducedMultiplex interface { // edge weight that does not sign-match the layer weight. // // If g is undirected it is modularised to minimise -// Q = \sum w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i k_j)/2m ] \delta(c_i,c_j). +// +// Q = \sum w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i k_j)/2m ] \delta(c_i,c_j). +// // If g is directed it is modularised to minimise -// Q = \sum w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i^in k_j^out)/m_{layer} ] \delta(c_i,c_j). +// +// Q = \sum w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i^in k_j^out)/m_{layer} ] \delta(c_i,c_j). // // The concrete type of the ReducedMultiplex will be a pointer to a // ReducedUndirectedMultiplex. diff --git a/graph/community/louvain_directed.go b/graph/community/louvain_directed.go index 06a4a9d6..ac070f35 100644 --- a/graph/community/louvain_directed.go +++ b/graph/community/louvain_directed.go @@ -22,8 +22,7 @@ import ( // is γ as defined in Reichardt and Bornholdt doi:10.1103/PhysRevE.74.016110. // qDirected will panic if g has any edge with negative edge weight. // -// Q = 1/m \sum_{ij} [ A_{ij} - (\gamma k_i^in k_j^out)/m ] \delta(c_i,c_j) -// +// Q = 1/m \sum_{ij} [ A_{ij} - (\gamma k_i^in k_j^out)/m ] \delta(c_i,c_j) func qDirected(g graph.Directed, communities [][]graph.Node, resolution float64) float64 { nodes := graph.NodesOf(g.Nodes()) weight := positiveWeightFuncFor(g) diff --git a/graph/community/louvain_directed_multiplex.go b/graph/community/louvain_directed_multiplex.go index 6befe0b4..6bd988ca 100644 --- a/graph/community/louvain_directed_multiplex.go +++ b/graph/community/louvain_directed_multiplex.go @@ -38,7 +38,7 @@ type DirectedMultiplex interface { // qUndirectedMultiplex will panic if the graph has any layer weight-scaled edge with // negative edge weight. // -// Q_{layer} = w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i k_j)/2m ] \delta(c_i,c_j) +// Q_{layer} = w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i k_j)/2m ] \delta(c_i,c_j) // // Note that Q values for multiplex graphs are not scaled by the total layer edge weight. func qDirectedMultiplex(g DirectedMultiplex, communities [][]graph.Node, weights, resolutions []float64) []float64 { diff --git a/graph/community/louvain_undirected.go b/graph/community/louvain_undirected.go index a0632135..60f6a092 100644 --- a/graph/community/louvain_undirected.go +++ b/graph/community/louvain_undirected.go @@ -22,7 +22,7 @@ import ( // is γ as defined in Reichardt and Bornholdt doi:10.1103/PhysRevE.74.016110. // qUndirected will panic if g has any edge with negative edge weight. // -// Q = 1/2m \sum_{ij} [ A_{ij} - (\gamma k_i k_j)/2m ] \delta(c_i,c_j) +// Q = 1/2m \sum_{ij} [ A_{ij} - (\gamma k_i k_j)/2m ] \delta(c_i,c_j) // // graph.Undirect may be used as a shim to allow calculation of Q for // directed graphs. diff --git a/graph/community/louvain_undirected_multiplex.go b/graph/community/louvain_undirected_multiplex.go index bfa5943f..0a8af3ca 100644 --- a/graph/community/louvain_undirected_multiplex.go +++ b/graph/community/louvain_undirected_multiplex.go @@ -38,7 +38,7 @@ type UndirectedMultiplex interface { // qUndirectedMultiplex will panic if the graph has any layer weight-scaled edge with // negative edge weight. // -// Q_{layer} = w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i k_j)/2m ] \delta(c_i,c_j) +// Q_{layer} = w_{layer} \sum_{ij} [ A_{layer}*_{ij} - (\gamma_{layer} k_i k_j)/2m ] \delta(c_i,c_j) // // Note that Q values for multiplex graphs are not scaled by the total layer edge weight. // diff --git a/graph/encoding/dot/doc.go b/graph/encoding/dot/doc.go index 235ac0f0..99f06dcd 100644 --- a/graph/encoding/dot/doc.go +++ b/graph/encoding/dot/doc.go @@ -11,7 +11,7 @@ // // DOT grammar: http://www.graphviz.org/doc/info/lang.html // -// Attribute quoting +// # Attribute quoting // // Attributes and IDs are quoted if needed during marshalling, to conform with // valid DOT syntax. Quoted IDs and attributes are unquoted during unmarshaling, diff --git a/graph/encoding/dot/encode.go b/graph/encoding/dot/encode.go index 9ece0e87..3469a6f3 100644 --- a/graph/encoding/dot/encode.go +++ b/graph/encoding/dot/encode.go @@ -636,11 +636,11 @@ var ( // // An ID is one of the following: // -// 1. Any string of alphabetic ([a-zA-Z\200-\377]) characters, underscores ('_') -// or digits ([0-9]), not beginning with a digit; -// 2. a numeral [-]?(.[0-9]+ | [0-9]+(.[0-9]*)? ); -// 3. any double-quoted string ("...") possibly containing escaped quotes (\"); -// 4. an HTML string (<...>). +// 1. Any string of alphabetic ([a-zA-Z\200-\377]) characters, underscores ('_') +// or digits ([0-9]), not beginning with a digit; +// 2. a numeral [-]?(.[0-9]+ | [0-9]+(.[0-9]*)? ); +// 3. any double-quoted string ("...") possibly containing escaped quotes (\"); +// 4. an HTML string (<...>). func isID(s string) bool { // 1. an identifier. if reIdent.MatchString(s) { diff --git a/graph/formats/dot/ast/ast.go b/graph/formats/dot/ast/ast.go index 63c7b0e7..7cf52f1d 100644 --- a/graph/formats/dot/ast/ast.go +++ b/graph/formats/dot/ast/ast.go @@ -21,12 +21,12 @@ import ( // // Examples. // -// digraph G { -// A -> B -// } -// graph H { -// C - D -// } +// digraph G { +// A -> B +// } +// graph H { +// C - D +// } type File struct { // Graphs. Graphs []*Graph @@ -50,10 +50,10 @@ func (f *File) String() string { // // Examples. // -// digraph G { -// A -> {B C} -// B -> C -// } +// digraph G { +// A -> {B C} +// B -> C +// } type Graph struct { // Strict graph; multi-edges forbidden. Strict bool @@ -91,11 +91,11 @@ func (g *Graph) String() string { // A Stmt represents a statement, and has one of the following underlying types. // -// *NodeStmt -// *EdgeStmt -// *AttrStmt -// *Attr -// *Subgraph +// *NodeStmt +// *EdgeStmt +// *AttrStmt +// *Attr +// *Subgraph type Stmt interface { fmt.Stringer // isStmt ensures that only statements can be assigned to the Stmt interface. @@ -108,7 +108,7 @@ type Stmt interface { // // Examples. // -// A [color=blue] +// A [color=blue] type NodeStmt struct { // Node. Node *Node @@ -139,9 +139,9 @@ func (e *NodeStmt) String() string { // // Examples. // -// A -> B -// A -> {B C} -// A -> B -> C +// A -> B +// A -> {B C} +// A -> B -> C type EdgeStmt struct { // Source vertex. From Vertex @@ -196,9 +196,9 @@ func (e *Edge) String() string { // // Examples. // -// graph [rankdir=LR] -// node [color=blue fillcolor=red] -// edge [minlen=1] +// graph [rankdir=LR] +// node [color=blue fillcolor=red] +// edge [minlen=1] type AttrStmt struct { // Graph component kind to which the attributes are assigned. Kind Kind @@ -250,7 +250,7 @@ func (k Kind) String() string { // // Examples. // -// rank=same +// rank=same type Attr struct { // Attribute key. Key string @@ -269,7 +269,7 @@ func (a *Attr) String() string { // // Examples. // -// subgraph S {A B C} +// subgraph S {A B C} type Subgraph struct { // Subgraph ID; or empty if none. ID string @@ -305,8 +305,8 @@ func (*Subgraph) isStmt() {} // A Vertex represents a vertex, and has one of the following underlying types. // -// *Node -// *Subgraph +// *Node +// *Subgraph type Vertex interface { fmt.Stringer // isVertex ensures that only vertices can be assigned to the Vertex @@ -320,8 +320,8 @@ type Vertex interface { // // Examples. // -// A -// A:nw +// A +// A:nw type Node struct { // Node ID. ID string diff --git a/graph/formats/rdf/debug.go b/graph/formats/rdf/debug.go index 24dcb534..da629cd0 100644 --- a/graph/formats/rdf/debug.go +++ b/graph/formats/rdf/debug.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build debug // +build debug package rdf diff --git a/graph/formats/rdf/iso_canonical.go b/graph/formats/rdf/iso_canonical.go index 7a133849..1114f183 100644 --- a/graph/formats/rdf/iso_canonical.go +++ b/graph/formats/rdf/iso_canonical.go @@ -692,11 +692,12 @@ func split(statements []*Statement) [][]*Statement { // // The correspondence between the parameters for the function in the paper // with the implementation here is as follows: -// - G = statements -// - hash = hash -// - P = parts (already sorted by hashBNodesPerSplit) -// - G⊥ = lowest -// - B = hash.blanks +// - G = statements +// - hash = hash +// - P = parts (already sorted by hashBNodesPerSplit) +// - G⊥ = lowest +// - B = hash.blanks +// // The additional parameter dist specifies that distinguish should treat // coequal trivial parts as a coarse of intermediate part and distinguish // the nodes in that merged part. diff --git a/graph/formats/rdf/nodebug.go b/graph/formats/rdf/nodebug.go index 9c3f2ddb..d8da1db8 100644 --- a/graph/formats/rdf/nodebug.go +++ b/graph/formats/rdf/nodebug.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !debug // +build !debug package rdf diff --git a/graph/formats/rdf/query.go b/graph/formats/rdf/query.go index 8dd548e0..5a5101ae 100644 --- a/graph/formats/rdf/query.go +++ b/graph/formats/rdf/query.go @@ -237,14 +237,13 @@ func (q Query) Not(p Query) Query { // is wanted, fn should return its input and false when the partial // traversal returns an empty result. // -// result := start.Repeat(func(q rdf.Query) (rdf.Query, bool) { -// r := q.Out(condition) -// if r.Len() == 0 { -// return q, false -// } -// return r, true -// }).Result() -// +// result := start.Repeat(func(q rdf.Query) (rdf.Query, bool) { +// r := q.Out(condition) +// if r.Len() == 0 { +// return q, false +// } +// return r, true +// }).Result() func (q Query) Repeat(fn func(Query) (q Query, ok bool)) Query { for { var ok bool diff --git a/graph/internal/set/set.go b/graph/internal/set/set.go index 0506b8e9..c172154b 100644 --- a/graph/internal/set/set.go +++ b/graph/internal/set/set.go @@ -171,13 +171,12 @@ func Equal(a, b Nodes) bool { // The union of two sets, a and b, is the set containing all the // elements of each, for instance: // -// {a,b,c} UNION {d,e,f} = {a,b,c,d,e,f} +// {a,b,c} UNION {d,e,f} = {a,b,c,d,e,f} // // Since sets may not have repetition, unions of two sets that overlap // do not contain repeat elements, that is: // -// {a,b,c} UNION {b,c,d} = {a,b,c,d} -// +// {a,b,c} UNION {b,c,d} = {a,b,c,d} func UnionOfNodes(a, b Nodes) Nodes { if same(a, b) { return CloneNodes(a) @@ -199,18 +198,17 @@ func UnionOfNodes(a, b Nodes) Nodes { // The intersection of two sets, a and b, is the set containing all // the elements shared between the two sets, for instance: // -// {a,b,c} INTERSECT {b,c,d} = {b,c} +// {a,b,c} INTERSECT {b,c,d} = {b,c} // // The intersection between a set and itself is itself, and thus // effectively a copy operation: // -// {a,b,c} INTERSECT {a,b,c} = {a,b,c} +// {a,b,c} INTERSECT {a,b,c} = {a,b,c} // // The intersection between two sets that share no elements is the empty // set: // -// {a,b,c} INTERSECT {d,e,f} = {} -// +// {a,b,c} INTERSECT {d,e,f} = {} func IntersectionOfNodes(a, b Nodes) Nodes { if same(a, b) { return CloneNodes(a) diff --git a/graph/iterator/map.go b/graph/iterator/map.go index c2d92297..2bfa2764 100644 --- a/graph/iterator/map.go +++ b/graph/iterator/map.go @@ -31,6 +31,7 @@ type emptyInterface struct { // Having a clone here allows us to embed a map iterator // inside type mapIter so that mapIters can be re-used // without doing any allocations. +// //lint:ignore U1000 This is a verbatim copy of the runtime type. type hiter struct { key unsafe.Pointer diff --git a/graph/network/betweenness.go b/graph/network/betweenness.go index bb1201f2..9d418dbb 100644 --- a/graph/network/betweenness.go +++ b/graph/network/betweenness.go @@ -14,7 +14,7 @@ import ( // Betweenness returns the non-zero betweenness centrality for nodes in the unweighted graph g. // -// C_B(v) = \sum_{s ≠ v ≠ t ∈ V} (\sigma_{st}(v) / \sigma_{st}) +// C_B(v) = \sum_{s ≠ v ≠ t ∈ V} (\sigma_{st}(v) / \sigma_{st}) // // where \sigma_{st} and \sigma_{st}(v) are the number of shortest paths from s to t, // and the subset of those paths containing v respectively. @@ -52,7 +52,7 @@ func Betweenness(g graph.Graph) map[int64]float64 { // EdgeBetweenness returns the non-zero betweenness centrality for edges in the // unweighted graph g. For an edge e the centrality C_B is computed as // -// C_B(e) = \sum_{s ≠ t ∈ V} (\sigma_{st}(e) / \sigma_{st}), +// C_B(e) = \sum_{s ≠ t ∈ V} (\sigma_{st}(e) / \sigma_{st}), // // where \sigma_{st} and \sigma_{st}(e) are the number of shortest paths from s // to t, and the subset of those paths containing e, respectively. @@ -146,7 +146,7 @@ func brandes(g graph.Graph, accumulate func(s graph.Node, stack linear.NodeStack // BetweennessWeighted returns the non-zero betweenness centrality for nodes in the weighted // graph g used to construct the given shortest paths. // -// C_B(v) = \sum_{s ≠ v ≠ t ∈ V} (\sigma_{st}(v) / \sigma_{st}) +// C_B(v) = \sum_{s ≠ v ≠ t ∈ V} (\sigma_{st}(v) / \sigma_{st}) // // where \sigma_{st} and \sigma_{st}(v) are the number of shortest paths from s to t, // and the subset of those paths containing v respectively. @@ -196,7 +196,7 @@ func BetweennessWeighted(g graph.Weighted, p path.AllShortest) map[int64]float64 // EdgeBetweennessWeighted returns the non-zero betweenness centrality for edges in // the weighted graph g. For an edge e the centrality C_B is computed as // -// C_B(e) = \sum_{s ≠ t ∈ V} (\sigma_{st}(e) / \sigma_{st}), +// C_B(e) = \sum_{s ≠ t ∈ V} (\sigma_{st}(e) / \sigma_{st}), // // where \sigma_{st} and \sigma_{st}(e) are the number of shortest paths from s // to t, and the subset of those paths containing e, respectively. diff --git a/graph/network/diffusion.go b/graph/network/diffusion.go index b9a99de7..e06ec7a2 100644 --- a/graph/network/diffusion.go +++ b/graph/network/diffusion.go @@ -14,7 +14,9 @@ import ( // h, according to the Laplacian with a diffusion time of t. // The resulting heat distribution is returned, written into the map dst and // returned, -// d = exp(-Lt)×h +// +// d = exp(-Lt)×h +// // where L is the graph Laplacian. Indexing into h and dst is defined by the // Laplacian Index field. If dst is nil, a new map is created. // @@ -45,7 +47,9 @@ func Diffuse(dst, h map[int64]float64, by spectral.Laplacian, t float64) map[int // DiffuseToEquilibrium performs a heat diffusion across nodes of the // graph described by the given Laplacian using the initial heat // distribution, h, according to the Laplacian until the update function -// h_{n+1} = h_n - L×h_n +// +// h_{n+1} = h_n - L×h_n +// // results in a 2-norm update difference within tol, or iters updates have // been made. // The resulting heat distribution is returned as eq, written into the map dst, diff --git a/graph/network/distance.go b/graph/network/distance.go index 3092249d..25a0c0c2 100644 --- a/graph/network/distance.go +++ b/graph/network/distance.go @@ -14,7 +14,7 @@ import ( // Closeness returns the closeness centrality for nodes in the graph g used to // construct the given shortest paths. // -// C(v) = 1 / \sum_u d(u,v) +// C(v) = 1 / \sum_u d(u,v) // // For directed graphs the incoming paths are used. Infinite distances are // not considered. @@ -43,7 +43,7 @@ func Closeness(g graph.Graph, p path.AllShortest) map[int64]float64 { // Farness returns the farness for nodes in the graph g used to construct // the given shortest paths. // -// F(v) = \sum_u d(u,v) +// F(v) = \sum_u d(u,v) // // For directed graphs the incoming paths are used. Infinite distances are // not considered. @@ -72,7 +72,7 @@ func Farness(g graph.Graph, p path.AllShortest) map[int64]float64 { // Harmonic returns the harmonic centrality for nodes in the graph g used to // construct the given shortest paths. // -// H(v)= \sum_{u ≠ v} 1 / d(u,v) +// H(v)= \sum_{u ≠ v} 1 / d(u,v) // // For directed graphs the incoming paths are used. Infinite distances are // not considered. @@ -103,7 +103,7 @@ func Harmonic(g graph.Graph, p path.AllShortest) map[int64]float64 { // Residual returns the Dangalchev's residual closeness for nodes in the graph // g used to construct the given shortest paths. // -// C(v)= \sum_{u ≠ v} 1 / 2^d(u,v) +// C(v)= \sum_{u ≠ v} 1 / 2^d(u,v) // // For directed graphs the incoming paths are used. Infinite distances are // not considered. diff --git a/graph/path/dynamic/dstarlite.go b/graph/path/dynamic/dstarlite.go index e1d9a1e4..d7730938 100644 --- a/graph/path/dynamic/dstarlite.go +++ b/graph/path/dynamic/dstarlite.go @@ -16,8 +16,7 @@ import ( // DStarLite implements the D* Lite dynamic re-planning path search algorithm. // -// doi:10.1109/tro.2004.838026 and ISBN:0-262-51129-0 pp476-483 -// +// doi:10.1109/tro.2004.838026 and ISBN:0-262-51129-0 pp476-483 type DStarLite struct { s, t *dStarLiteNode last *dStarLiteNode @@ -426,8 +425,7 @@ func (k key) isBadKey() bool { return k != k } // less returns whether k is less than other. From ISBN:0-262-51129-0 pp476-483: // -// k ≤ k' iff k₁ < k'₁ OR (k₁ == k'₁ AND k₂ ≤ k'₂) -// +// k ≤ k' iff k₁ < k'₁ OR (k₁ == k'₁ AND k₂ ≤ k'₂) func (k key) less(other key) bool { if k.isBadKey() || other.isBadKey() { panic("D* Lite: poisoned key") diff --git a/graph/topo/tarjan.go b/graph/topo/tarjan.go index 17bda239..ab479a92 100644 --- a/graph/topo/tarjan.go +++ b/graph/topo/tarjan.go @@ -87,7 +87,6 @@ func sortedFrom(sccs [][]graph.Node, order func([]graph.Node)) ([]graph.Node, er // Generally speaking, a directed graph where the number of strongly connected components is equal // to the number of nodes is acyclic, unless you count reflexive edges as a cycle (which requires // only a little extra testing.) -// func TarjanSCC(g graph.Directed) [][]graph.Node { return tarjanSCCstabilized(g, nil) } @@ -130,7 +129,6 @@ func tarjanSCCstabilized(g graph.Directed, order func([]graph.Node)) [][]graph.N // algorithm. The implementation is from the pseudocode at // // http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm?oldid=642744644 -// type tarjan struct { succ func(id int64) []graph.Node diff --git a/integrate/quad/doc.go b/integrate/quad/doc.go index 91757067..46d2e84a 100644 --- a/integrate/quad/doc.go +++ b/integrate/quad/doc.go @@ -3,5 +3,4 @@ // license that can be found in the LICENSE file. // Package quad provides numerical evaluation of definite integrals of single-variable functions. -// package quad // import "gonum.org/v1/gonum/integrate/quad" diff --git a/integrate/quad/hermite.go b/integrate/quad/hermite.go index 88ff4697..c21f502a 100644 --- a/integrate/quad/hermite.go +++ b/integrate/quad/hermite.go @@ -13,7 +13,8 @@ import ( // Hermite generates sample locations and weights for performing quadrature with // a squared-exponential weight -// int_-inf^inf e^(-x^2) f(x) dx . +// +// int_-inf^inf e^(-x^2) f(x) dx . type Hermite struct{} func (h Hermite) FixedLocations(x, weight []float64, min, max float64) { diff --git a/integrate/quad/legendre.go b/integrate/quad/legendre.go index d9aa1a6d..3f8ab389 100644 --- a/integrate/quad/legendre.go +++ b/integrate/quad/legendre.go @@ -7,7 +7,8 @@ package quad import "math" // Legendre integrates an unweighted function over finite bounds -// int_min^max f(x) dx +// +// int_min^max f(x) dx type Legendre struct{} func (l Legendre) FixedLocations(x, weight []float64, min, max float64) { diff --git a/integrate/quad/quad.go b/integrate/quad/quad.go index 21048350..121dd948 100644 --- a/integrate/quad/quad.go +++ b/integrate/quad/quad.go @@ -12,7 +12,8 @@ import ( // FixedLocationer computes a set of quadrature locations and weights and stores // them in-place into x and weight respectively. The number of points generated is equal to // the len(x). The weights and locations should be chosen such that -// int_min^max f(x) dx ≈ \sum_i w_i f(x_i) +// +// int_min^max f(x) dx ≈ \sum_i w_i f(x_i) type FixedLocationer interface { FixedLocations(x, weight []float64, min, max float64) } @@ -26,7 +27,9 @@ type FixedLocationSingler interface { // Fixed approximates the integral of the function f from min to max using a fixed // n-point quadrature rule. During evaluation, f will be evaluated n times using // the weights and locations specified by rule. That is, Fixed estimates -// int_min^max f(x) dx ≈ \sum_i w_i f(x_i) +// +// int_min^max f(x) dx ≈ \sum_i w_i f(x_i) +// // If rule is nil, an acceptable default is chosen, otherwise it is // assumed that the properties of the integral match the assumptions of rule. // For example, Legendre assumes that the integration bounds are finite. If diff --git a/integrate/romberg.go b/integrate/romberg.go index 25e45d0a..8ea87306 100644 --- a/integrate/romberg.go +++ b/integrate/romberg.go @@ -10,10 +10,14 @@ import ( ) // Romberg returns an approximate value of the integral -// \int_a^b f(x)dx +// +// \int_a^b f(x)dx +// // computed using the Romberg's method. The function f is given // as a slice of equally-spaced samples, that is, -// f[i] = f(a + i*dx) +// +// f[i] = f(a + i*dx) +// // and dx is the spacing between the samples. // // The length of f must be 2^k + 1, where k is a positive integer, diff --git a/integrate/simpsons.go b/integrate/simpsons.go index ab221c7e..b126e9b0 100644 --- a/integrate/simpsons.go +++ b/integrate/simpsons.go @@ -7,10 +7,14 @@ package integrate import "sort" // Simpsons returns an approximate value of the integral -// \int_a^b f(x)dx +// +// \int_a^b f(x)dx +// // computed using the Simpsons's method. The function f is given as a slice of // samples evaluated at locations in x, that is, -// f[i] = f(x[i]), x[0] = a, x[len(x)-1] = b +// +// f[i] = f(x[i]), x[0] = a, x[len(x)-1] = b +// // The slice x must be sorted in strictly increasing order. x and f must be of // equal length and the length must be at least 3. // diff --git a/integrate/testquad/testquad.go b/integrate/testquad/testquad.go index f6b6a8f6..fb879d1f 100644 --- a/integrate/testquad/testquad.go +++ b/integrate/testquad/testquad.go @@ -11,7 +11,9 @@ import ( ) // Integral is a definite integral -// ∫_a^b f(x)dx +// +// ∫_a^b f(x)dx +// // with a known value. type Integral struct { Name string @@ -21,7 +23,8 @@ type Integral struct { } // Constant returns the integral of a constant function -// ∫_{-1}^2 alpha dx +// +// ∫_{-1}^2 alpha dx func Constant(alpha float64) Integral { return Integral{ Name: fmt.Sprintf("∫_{-1}^{2} %vdx", alpha), @@ -35,7 +38,8 @@ func Constant(alpha float64) Integral { } // Poly returns the integral of a polynomial -// ∫_{-1}^2 x^degree dx +// +// ∫_{-1}^2 x^degree dx func Poly(degree int) Integral { d := float64(degree) return Integral{ @@ -50,7 +54,8 @@ func Poly(degree int) Integral { } // Sin returns the integral -// ∫_0^1 sin(x)dx +// +// ∫_0^1 sin(x)dx func Sin() Integral { return Integral{ Name: "∫_0^1 sin(x)dx", @@ -64,7 +69,8 @@ func Sin() Integral { } // XExpMinusX returns the integral -// ∫_0^1 x*exp(-x)dx +// +// ∫_0^1 x*exp(-x)dx func XExpMinusX() Integral { return Integral{ Name: "∫_0^1 x*exp(-x)dx", @@ -78,7 +84,8 @@ func XExpMinusX() Integral { } // Sqrt returns the integral -// ∫_0^1 sqrt(x)dx +// +// ∫_0^1 sqrt(x)dx func Sqrt() Integral { return Integral{ Name: "∫_0^1 sqrt(x)dx", @@ -92,7 +99,8 @@ func Sqrt() Integral { } // ExpOverX2Plus1 returns the integral -// ∫_0^1 exp(x)/(x*x+1)dx +// +// ∫_0^1 exp(x)/(x*x+1)dx func ExpOverX2Plus1() Integral { return Integral{ Name: "∫_0^1 exp(x)/(x*x+1)dx", diff --git a/integrate/trapezoidal.go b/integrate/trapezoidal.go index 1b8e4a13..1e31070d 100644 --- a/integrate/trapezoidal.go +++ b/integrate/trapezoidal.go @@ -7,18 +7,26 @@ package integrate import "sort" // Trapezoidal returns an approximate value of the integral -// \int_a^b f(x) dx +// +// \int_a^b f(x) dx +// // computed using the trapezoidal rule. The function f is given as a slice of // samples evaluated at locations in x, that is, -// f[i] = f(x[i]), x[0] = a, x[len(x)-1] = b +// +// f[i] = f(x[i]), x[0] = a, x[len(x)-1] = b +// // The slice x must be sorted in strictly increasing order. x and f must be of // equal length and the length must be at least 2. // // The trapezoidal rule approximates f by a piecewise linear function and // estimates -// \int_x[i]^x[i+1] f(x) dx +// +// \int_x[i]^x[i+1] f(x) dx +// // as -// (x[i+1] - x[i]) * (f[i] + f[i+1])/2 +// +// (x[i+1] - x[i]) * (f[i] + f[i+1])/2 +// // More details on the trapezoidal rule can be found at: // https://en.wikipedia.org/wiki/Trapezoidal_rule func Trapezoidal(x, f []float64) float64 { diff --git a/internal/asm/c128/scal.go b/internal/asm/c128/scal.go index 47a80e50..27c35817 100644 --- a/internal/asm/c128/scal.go +++ b/internal/asm/c128/scal.go @@ -5,9 +5,10 @@ package c128 // ScalUnitaryTo is -// for i, v := range x { -// dst[i] = alpha * v -// } +// +// for i, v := range x { +// dst[i] = alpha * v +// } func ScalUnitaryTo(dst []complex128, alpha complex128, x []complex128) { for i, v := range x { dst[i] = alpha * v @@ -15,12 +16,13 @@ func ScalUnitaryTo(dst []complex128, alpha complex128, x []complex128) { } // ScalIncTo is -// var idst, ix uintptr -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha * x[ix] -// ix += incX -// idst += incDst -// } +// +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } func ScalIncTo(dst []complex128, incDst uintptr, alpha complex128, x []complex128, n, incX uintptr) { var idst, ix uintptr for i := 0; i < int(n); i++ { diff --git a/internal/asm/c128/stubs.go b/internal/asm/c128/stubs.go index 964e5c49..9c3a8fb8 100644 --- a/internal/asm/c128/stubs.go +++ b/internal/asm/c128/stubs.go @@ -10,9 +10,10 @@ import ( ) // Add is -// for i, v := range s { -// dst[i] += v -// } +// +// for i, v := range s { +// dst[i] += v +// } func Add(dst, s []complex128) { for i, v := range s { dst[i] += v @@ -20,9 +21,10 @@ func Add(dst, s []complex128) { } // AddConst is -// for i := range x { -// x[i] += alpha -// } +// +// for i := range x { +// x[i] += alpha +// } func AddConst(alpha complex128, x []complex128) { for i := range x { x[i] += alpha @@ -30,14 +32,15 @@ func AddConst(alpha complex128, x []complex128) { } // CumSum is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] + v -// } -// return dst +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst func CumSum(dst, s []complex128) []complex128 { if len(s) == 0 { return dst @@ -50,14 +53,15 @@ func CumSum(dst, s []complex128) []complex128 { } // CumProd is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] * v -// } -// return dst +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst func CumProd(dst, s []complex128) []complex128 { if len(s) == 0 { return dst @@ -70,9 +74,10 @@ func CumProd(dst, s []complex128) []complex128 { } // Div is -// for i, v := range s { -// dst[i] /= v -// } +// +// for i, v := range s { +// dst[i] /= v +// } func Div(dst, s []complex128) { for i, v := range s { dst[i] /= v @@ -80,10 +85,11 @@ func Div(dst, s []complex128) { } // DivTo is -// for i, v := range s { -// dst[i] = v / t[i] -// } -// return dst +// +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst func DivTo(dst, s, t []complex128) []complex128 { for i, v := range s { dst[i] = v / t[i] @@ -92,10 +98,11 @@ func DivTo(dst, s, t []complex128) []complex128 { } // DotUnitary is -// for i, v := range x { -// sum += cmplx.Conj(v) * y[i] -// } -// return sum +// +// for i, v := range x { +// sum += cmplx.Conj(v) * y[i] +// } +// return sum func DotUnitary(x, y []complex128) (sum complex128) { for i, v := range x { sum += cmplx.Conj(v) * y[i] @@ -159,10 +166,11 @@ func L2NormUnitary(x []complex128) (norm float64) { } // Sum is -// var sum complex128 -// for i := range x { -// sum += x[i] -// } +// +// var sum complex128 +// for i := range x { +// sum += x[i] +// } func Sum(x []complex128) complex128 { var sum complex128 for _, v := range x { diff --git a/internal/asm/c128/stubs_amd64.go b/internal/asm/c128/stubs_amd64.go index dbf3ff46..c0e26a2f 100644 --- a/internal/asm/c128/stubs_amd64.go +++ b/internal/asm/c128/stubs_amd64.go @@ -8,90 +8,102 @@ package c128 // AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } +// +// for i, v := range x { +// y[i] += alpha * v +// } func AxpyUnitary(alpha complex128, x, y []complex128) // AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } +// +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) // AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } +// +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) // AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } +// +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) // DscalUnitary is -// for i, v := range x { -// x[i] = complex(real(v)*alpha, imag(v)*alpha) -// } +// +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } func DscalUnitary(alpha float64, x []complex128) // DscalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) -// ix += inc -// } +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } func DscalInc(alpha float64, x []complex128, n, inc uintptr) // ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } func ScalInc(alpha complex128, x []complex128, n, inc uintptr) // ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } +// +// for i := range x { +// x[i] *= alpha +// } func ScalUnitary(alpha complex128, x []complex128) // DotcUnitary is -// for i, v := range x { -// sum += y[i] * cmplx.Conj(v) -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * cmplx.Conj(v) +// } +// return sum func DotcUnitary(x, y []complex128) (sum complex128) // DotcInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * cmplx.Conj(x[ix]) -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * cmplx.Conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) // DotuUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * v +// } +// return sum func DotuUnitary(x, y []complex128) (sum complex128) // DotuInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) diff --git a/internal/asm/c128/stubs_noasm.go b/internal/asm/c128/stubs_noasm.go index 42b3e9c6..21dfc4a8 100644 --- a/internal/asm/c128/stubs_noasm.go +++ b/internal/asm/c128/stubs_noasm.go @@ -10,9 +10,10 @@ package c128 import "math/cmplx" // AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } +// +// for i, v := range x { +// y[i] += alpha * v +// } func AxpyUnitary(alpha complex128, x, y []complex128) { for i, v := range x { y[i] += alpha * v @@ -20,9 +21,10 @@ func AxpyUnitary(alpha complex128, x, y []complex128) { } // AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } +// +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) { for i, v := range x { dst[i] = alpha*v + y[i] @@ -30,11 +32,12 @@ func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) { } // AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } +// +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { for i := 0; i < int(n); i++ { y[iy] += alpha * x[ix] @@ -44,12 +47,13 @@ func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) } // AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } +// +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { for i := 0; i < int(n); i++ { dst[idst] = alpha*x[ix] + y[iy] @@ -60,9 +64,10 @@ func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y [] } // DscalUnitary is -// for i, v := range x { -// x[i] = complex(real(v)*alpha, imag(v)*alpha) -// } +// +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } func DscalUnitary(alpha float64, x []complex128) { for i, v := range x { x[i] = complex(real(v)*alpha, imag(v)*alpha) @@ -70,11 +75,12 @@ func DscalUnitary(alpha float64, x []complex128) { } // DscalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) -// ix += inc -// } +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } func DscalInc(alpha float64, x []complex128, n, inc uintptr) { var ix uintptr for i := 0; i < int(n); i++ { @@ -84,11 +90,12 @@ func DscalInc(alpha float64, x []complex128, n, inc uintptr) { } // ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } func ScalInc(alpha complex128, x []complex128, n, inc uintptr) { var ix uintptr for i := 0; i < int(n); i++ { @@ -98,9 +105,10 @@ func ScalInc(alpha complex128, x []complex128, n, inc uintptr) { } // ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } +// +// for i := range x { +// x[i] *= alpha +// } func ScalUnitary(alpha complex128, x []complex128) { for i := range x { x[i] *= alpha @@ -108,10 +116,11 @@ func ScalUnitary(alpha complex128, x []complex128) { } // DotcUnitary is -// for i, v := range x { -// sum += y[i] * cmplx.Conj(v) -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * cmplx.Conj(v) +// } +// return sum func DotcUnitary(x, y []complex128) (sum complex128) { for i, v := range x { sum += y[i] * cmplx.Conj(v) @@ -120,12 +129,13 @@ func DotcUnitary(x, y []complex128) (sum complex128) { } // DotcInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * cmplx.Conj(x[ix]) -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * cmplx.Conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { for i := 0; i < int(n); i++ { sum += y[iy] * cmplx.Conj(x[ix]) @@ -136,10 +146,11 @@ func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) } // DotuUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * v +// } +// return sum func DotuUnitary(x, y []complex128) (sum complex128) { for i, v := range x { sum += y[i] * v @@ -148,12 +159,13 @@ func DotuUnitary(x, y []complex128) (sum complex128) { } // DotuInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { for i := 0; i < int(n); i++ { sum += y[iy] * x[ix] diff --git a/internal/asm/c64/scal.go b/internal/asm/c64/scal.go index a84def87..6db0aa36 100644 --- a/internal/asm/c64/scal.go +++ b/internal/asm/c64/scal.go @@ -5,9 +5,10 @@ package c64 // ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } +// +// for i := range x { +// x[i] *= alpha +// } func ScalUnitary(alpha complex64, x []complex64) { for i := range x { x[i] *= alpha @@ -15,9 +16,10 @@ func ScalUnitary(alpha complex64, x []complex64) { } // ScalUnitaryTo is -// for i, v := range x { -// dst[i] = alpha * v -// } +// +// for i, v := range x { +// dst[i] = alpha * v +// } func ScalUnitaryTo(dst []complex64, alpha complex64, x []complex64) { for i, v := range x { dst[i] = alpha * v @@ -25,11 +27,12 @@ func ScalUnitaryTo(dst []complex64, alpha complex64, x []complex64) { } // ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } func ScalInc(alpha complex64, x []complex64, n, incX uintptr) { var ix uintptr for i := 0; i < int(n); i++ { @@ -39,12 +42,13 @@ func ScalInc(alpha complex64, x []complex64, n, incX uintptr) { } // ScalIncTo is -// var idst, ix uintptr -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha * x[ix] -// ix += incX -// idst += incDst -// } +// +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } func ScalIncTo(dst []complex64, incDst uintptr, alpha complex64, x []complex64, n, incX uintptr) { var idst, ix uintptr for i := 0; i < int(n); i++ { @@ -55,9 +59,10 @@ func ScalIncTo(dst []complex64, incDst uintptr, alpha complex64, x []complex64, } // SscalUnitary is -// for i, v := range x { -// x[i] = complex(real(v)*alpha, imag(v)*alpha) -// } +// +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } func SscalUnitary(alpha float32, x []complex64) { for i, v := range x { x[i] = complex(real(v)*alpha, imag(v)*alpha) @@ -65,11 +70,12 @@ func SscalUnitary(alpha float32, x []complex64) { } // SscalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) -// ix += inc -// } +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } func SscalInc(alpha float32, x []complex64, n, inc uintptr) { var ix uintptr for i := 0; i < int(n); i++ { diff --git a/internal/asm/c64/stubs.go b/internal/asm/c64/stubs.go index b0f47519..0aa626e1 100644 --- a/internal/asm/c64/stubs.go +++ b/internal/asm/c64/stubs.go @@ -10,9 +10,10 @@ import ( ) // Add is -// for i, v := range s { -// dst[i] += v -// } +// +// for i, v := range s { +// dst[i] += v +// } func Add(dst, s []complex64) { for i, v := range s { dst[i] += v @@ -20,9 +21,10 @@ func Add(dst, s []complex64) { } // AddConst is -// for i := range x { -// x[i] += alpha -// } +// +// for i := range x { +// x[i] += alpha +// } func AddConst(alpha complex64, x []complex64) { for i := range x { x[i] += alpha @@ -30,14 +32,15 @@ func AddConst(alpha complex64, x []complex64) { } // CumSum is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] + v -// } -// return dst +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst func CumSum(dst, s []complex64) []complex64 { if len(s) == 0 { return dst @@ -50,14 +53,15 @@ func CumSum(dst, s []complex64) []complex64 { } // CumProd is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] * v -// } -// return dst +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst func CumProd(dst, s []complex64) []complex64 { if len(s) == 0 { return dst @@ -70,9 +74,10 @@ func CumProd(dst, s []complex64) []complex64 { } // Div is -// for i, v := range s { -// dst[i] /= v -// } +// +// for i, v := range s { +// dst[i] /= v +// } func Div(dst, s []complex64) { for i, v := range s { dst[i] /= v @@ -80,10 +85,11 @@ func Div(dst, s []complex64) { } // DivTo is -// for i, v := range s { -// dst[i] = v / t[i] -// } -// return dst +// +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst func DivTo(dst, s, t []complex64) []complex64 { for i, v := range s { dst[i] = v / t[i] @@ -92,10 +98,11 @@ func DivTo(dst, s, t []complex64) []complex64 { } // DotUnitary is -// for i, v := range x { -// sum += conj(v) * y[i] -// } -// return sum +// +// for i, v := range x { +// sum += conj(v) * y[i] +// } +// return sum func DotUnitary(x, y []complex64) (sum complex64) { for i, v := range x { sum += cmplx64.Conj(v) * y[i] @@ -159,10 +166,11 @@ func L2NormUnitary(x []complex64) (norm float32) { } // Sum is -// var sum complex64 -// for i := range x { -// sum += x[i] -// } +// +// var sum complex64 +// for i := range x { +// sum += x[i] +// } func Sum(x []complex64) complex64 { var sum complex64 for _, v := range x { diff --git a/internal/asm/c64/stubs_amd64.go b/internal/asm/c64/stubs_amd64.go index 2fff60ac..71367b01 100644 --- a/internal/asm/c64/stubs_amd64.go +++ b/internal/asm/c64/stubs_amd64.go @@ -8,62 +8,70 @@ package c64 // AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } +// +// for i, v := range x { +// y[i] += alpha * v +// } func AxpyUnitary(alpha complex64, x, y []complex64) // AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } +// +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) // AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } +// +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) // AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } +// +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) // DotcUnitary is -// for i, v := range x { -// sum += y[i] * conj(v) -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * conj(v) +// } +// return sum func DotcUnitary(x, y []complex64) (sum complex64) // DotcInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * conj(x[ix]) -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) // DotuUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * v +// } +// return sum func DotuUnitary(x, y []complex64) (sum complex64) // DotuInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) diff --git a/internal/asm/c64/stubs_noasm.go b/internal/asm/c64/stubs_noasm.go index 6729b7a5..0d79b24f 100644 --- a/internal/asm/c64/stubs_noasm.go +++ b/internal/asm/c64/stubs_noasm.go @@ -8,9 +8,10 @@ package c64 // AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } +// +// for i, v := range x { +// y[i] += alpha * v +// } func AxpyUnitary(alpha complex64, x, y []complex64) { for i, v := range x { y[i] += alpha * v @@ -18,9 +19,10 @@ func AxpyUnitary(alpha complex64, x, y []complex64) { } // AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } +// +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) { for i, v := range x { dst[i] = alpha*v + y[i] @@ -28,11 +30,12 @@ func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) { } // AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } +// +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { for i := 0; i < int(n); i++ { y[iy] += alpha * x[ix] @@ -42,12 +45,13 @@ func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { } // AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } +// +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { for i := 0; i < int(n); i++ { dst[idst] = alpha*x[ix] + y[iy] @@ -58,10 +62,11 @@ func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []co } // DotcUnitary is -// for i, v := range x { -// sum += y[i] * conj(v) -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * conj(v) +// } +// return sum func DotcUnitary(x, y []complex64) (sum complex64) { for i, v := range x { sum += y[i] * conj(v) @@ -70,12 +75,13 @@ func DotcUnitary(x, y []complex64) (sum complex64) { } // DotcInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * conj(x[ix]) -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { for i := 0; i < int(n); i++ { sum += y[iy] * conj(x[ix]) @@ -86,10 +92,11 @@ func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { } // DotuUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * v +// } +// return sum func DotuUnitary(x, y []complex64) (sum complex64) { for i, v := range x { sum += y[i] * v @@ -98,12 +105,13 @@ func DotuUnitary(x, y []complex64) (sum complex64) { } // DotuInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { for i := 0; i < int(n); i++ { sum += y[iy] * x[ix] diff --git a/internal/asm/f32/ge_amd64.go b/internal/asm/f32/ge_amd64.go index 5a67f5ff..72acba20 100644 --- a/internal/asm/f32/ge_amd64.go +++ b/internal/asm/f32/ge_amd64.go @@ -8,7 +8,9 @@ package f32 // Ger performs the rank-one operation -// A += alpha * x * yᵀ +// +// A += alpha * x * yᵀ +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func Ger(m, n uintptr, alpha float32, x []float32, incX uintptr, diff --git a/internal/asm/f32/ge_noasm.go b/internal/asm/f32/ge_noasm.go index 40e83b31..61ee6f18 100644 --- a/internal/asm/f32/ge_noasm.go +++ b/internal/asm/f32/ge_noasm.go @@ -8,7 +8,9 @@ package f32 // Ger performs the rank-one operation -// A += alpha * x * yᵀ +// +// A += alpha * x * yᵀ +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func Ger(m, n uintptr, alpha float32, x []float32, incX uintptr, y []float32, incY uintptr, a []float32, lda uintptr) { diff --git a/internal/asm/f32/gemv.go b/internal/asm/f32/gemv.go index d29b7947..a6000504 100644 --- a/internal/asm/f32/gemv.go +++ b/internal/asm/f32/gemv.go @@ -5,7 +5,9 @@ package f32 // GemvN computes -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. func GemvN(m, n uintptr, alpha float32, a []float32, lda uintptr, x []float32, incX uintptr, beta float32, y []float32, incY uintptr) { var kx, ky, i uintptr @@ -43,7 +45,9 @@ func GemvN(m, n uintptr, alpha float32, a []float32, lda uintptr, x []float32, i } // GemvT computes -// y = alpha * Aᵀ * x + beta * y +// +// y = alpha * Aᵀ * x + beta * y +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. func GemvT(m, n uintptr, alpha float32, a []float32, lda uintptr, x []float32, incX uintptr, beta float32, y []float32, incY uintptr) { var kx, ky, i uintptr diff --git a/internal/asm/f32/scal.go b/internal/asm/f32/scal.go index d0867a46..ad2adee6 100644 --- a/internal/asm/f32/scal.go +++ b/internal/asm/f32/scal.go @@ -5,9 +5,10 @@ package f32 // ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } +// +// for i := range x { +// x[i] *= alpha +// } func ScalUnitary(alpha float32, x []float32) { for i := range x { x[i] *= alpha @@ -15,9 +16,10 @@ func ScalUnitary(alpha float32, x []float32) { } // ScalUnitaryTo is -// for i, v := range x { -// dst[i] = alpha * v -// } +// +// for i, v := range x { +// dst[i] = alpha * v +// } func ScalUnitaryTo(dst []float32, alpha float32, x []float32) { for i, v := range x { dst[i] = alpha * v @@ -25,11 +27,12 @@ func ScalUnitaryTo(dst []float32, alpha float32, x []float32) { } // ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } func ScalInc(alpha float32, x []float32, n, incX uintptr) { var ix uintptr for i := 0; i < int(n); i++ { @@ -39,12 +42,13 @@ func ScalInc(alpha float32, x []float32, n, incX uintptr) { } // ScalIncTo is -// var idst, ix uintptr -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha * x[ix] -// ix += incX -// idst += incDst -// } +// +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } func ScalIncTo(dst []float32, incDst uintptr, alpha float32, x []float32, n, incX uintptr) { var idst, ix uintptr for i := 0; i < int(n); i++ { diff --git a/internal/asm/f32/stubs_amd64.go b/internal/asm/f32/stubs_amd64.go index 2cdd1cf9..2ea05197 100644 --- a/internal/asm/f32/stubs_amd64.go +++ b/internal/asm/f32/stubs_amd64.go @@ -8,70 +8,79 @@ package f32 // AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } +// +// for i, v := range x { +// y[i] += alpha * v +// } func AxpyUnitary(alpha float32, x, y []float32) // AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } +// +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) // AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } +// +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) // AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } +// +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) // DdotUnitary is -// for i, v := range x { -// sum += float64(y[i]) * float64(v) -// } -// return +// +// for i, v := range x { +// sum += float64(y[i]) * float64(v) +// } +// return func DdotUnitary(x, y []float32) (sum float64) // DdotInc is -// for i := 0; i < int(n); i++ { -// sum += float64(y[iy]) * float64(x[ix]) -// ix += incX -// iy += incY -// } -// return +// +// for i := 0; i < int(n); i++ { +// sum += float64(y[iy]) * float64(x[ix]) +// ix += incX +// iy += incY +// } +// return func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) // DotUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * v +// } +// return sum func DotUnitary(x, y []float32) (sum float32) // DotInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) // Sum is -// var sum float32 -// for _, v := range x { -// sum += v -// } -// return sum +// +// var sum float32 +// for _, v := range x { +// sum += v +// } +// return sum func Sum(x []float32) float32 diff --git a/internal/asm/f32/stubs_noasm.go b/internal/asm/f32/stubs_noasm.go index 319d1406..07b36ff3 100644 --- a/internal/asm/f32/stubs_noasm.go +++ b/internal/asm/f32/stubs_noasm.go @@ -8,9 +8,10 @@ package f32 // AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } +// +// for i, v := range x { +// y[i] += alpha * v +// } func AxpyUnitary(alpha float32, x, y []float32) { for i, v := range x { y[i] += alpha * v @@ -18,9 +19,10 @@ func AxpyUnitary(alpha float32, x, y []float32) { } // AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } +// +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) { for i, v := range x { dst[i] = alpha*v + y[i] @@ -28,11 +30,12 @@ func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) { } // AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } +// +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { for i := 0; i < int(n); i++ { y[iy] += alpha * x[ix] @@ -42,12 +45,13 @@ func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { } // AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } +// +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { for i := 0; i < int(n); i++ { dst[idst] = alpha*x[ix] + y[iy] @@ -58,10 +62,11 @@ func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float3 } // DotUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * v +// } +// return sum func DotUnitary(x, y []float32) (sum float32) { for i, v := range x { sum += y[i] * v @@ -70,12 +75,13 @@ func DotUnitary(x, y []float32) (sum float32) { } // DotInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) { for i := 0; i < int(n); i++ { sum += y[iy] * x[ix] @@ -86,10 +92,11 @@ func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) { } // DdotUnitary is -// for i, v := range x { -// sum += float64(y[i]) * float64(v) -// } -// return +// +// for i, v := range x { +// sum += float64(y[i]) * float64(v) +// } +// return func DdotUnitary(x, y []float32) (sum float64) { for i, v := range x { sum += float64(y[i]) * float64(v) @@ -98,12 +105,13 @@ func DdotUnitary(x, y []float32) (sum float64) { } // DdotInc is -// for i := 0; i < int(n); i++ { -// sum += float64(y[iy]) * float64(x[ix]) -// ix += incX -// iy += incY -// } -// return +// +// for i := 0; i < int(n); i++ { +// sum += float64(y[iy]) * float64(x[ix]) +// ix += incX +// iy += incY +// } +// return func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) { for i := 0; i < int(n); i++ { sum += float64(y[iy]) * float64(x[ix]) @@ -114,11 +122,12 @@ func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) { } // Sum is -// var sum float32 -// for _, v := range x { -// sum += v -// } -// return sum +// +// var sum float32 +// for _, v := range x { +// sum += v +// } +// return sum func Sum(x []float32) float32 { var sum float32 for _, v := range x { diff --git a/internal/asm/f64/axpy.go b/internal/asm/f64/axpy.go index 4fc9e0eb..2ab8129a 100644 --- a/internal/asm/f64/axpy.go +++ b/internal/asm/f64/axpy.go @@ -8,9 +8,10 @@ package f64 // AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } +// +// for i, v := range x { +// y[i] += alpha * v +// } func AxpyUnitary(alpha float64, x, y []float64) { for i, v := range x { y[i] += alpha * v @@ -18,9 +19,10 @@ func AxpyUnitary(alpha float64, x, y []float64) { } // AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } +// +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) { for i, v := range x { dst[i] = alpha*v + y[i] @@ -28,11 +30,12 @@ func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) { } // AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } +// +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { for i := 0; i < int(n); i++ { y[iy] += alpha * x[ix] @@ -42,12 +45,13 @@ func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { } // AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } +// +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { for i := 0; i < int(n); i++ { dst[idst] = alpha*x[ix] + y[iy] diff --git a/internal/asm/f64/dot.go b/internal/asm/f64/dot.go index a6818507..09931644 100644 --- a/internal/asm/f64/dot.go +++ b/internal/asm/f64/dot.go @@ -8,10 +8,11 @@ package f64 // DotUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * v +// } +// return sum func DotUnitary(x, y []float64) (sum float64) { for i, v := range x { sum += y[i] * v @@ -20,12 +21,13 @@ func DotUnitary(x, y []float64) (sum float64) { } // DotInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) { for i := 0; i < int(n); i++ { sum += y[iy] * x[ix] diff --git a/internal/asm/f64/ge_amd64.go b/internal/asm/f64/ge_amd64.go index fda70a56..5b042338 100644 --- a/internal/asm/f64/ge_amd64.go +++ b/internal/asm/f64/ge_amd64.go @@ -8,16 +8,22 @@ package f64 // Ger performs the rank-one operation -// A += alpha * x * yᵀ +// +// A += alpha * x * yᵀ +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) // GemvN computes -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) // GemvT computes -// y = alpha * Aᵀ * x + beta * y +// +// y = alpha * Aᵀ * x + beta * y +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) diff --git a/internal/asm/f64/ge_noasm.go b/internal/asm/f64/ge_noasm.go index 0c46515c..e8dee051 100644 --- a/internal/asm/f64/ge_noasm.go +++ b/internal/asm/f64/ge_noasm.go @@ -8,7 +8,9 @@ package f64 // Ger performs the rank-one operation -// A += alpha * x * yᵀ +// +// A += alpha * x * yᵀ +// // where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) { if incX == 1 && incY == 1 { @@ -36,7 +38,9 @@ func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, in } // GemvN computes -// y = alpha * A * x + beta * y +// +// y = alpha * A * x + beta * y +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { var kx, ky, i uintptr @@ -74,7 +78,9 @@ func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, i } // GemvT computes -// y = alpha * Aᵀ * x + beta * y +// +// y = alpha * Aᵀ * x + beta * y +// // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { var kx, ky, i uintptr diff --git a/internal/asm/f64/scal.go b/internal/asm/f64/scal.go index 5269e9b0..c95219e1 100644 --- a/internal/asm/f64/scal.go +++ b/internal/asm/f64/scal.go @@ -8,9 +8,10 @@ package f64 // ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } +// +// for i := range x { +// x[i] *= alpha +// } func ScalUnitary(alpha float64, x []float64) { for i := range x { x[i] *= alpha @@ -18,9 +19,10 @@ func ScalUnitary(alpha float64, x []float64) { } // ScalUnitaryTo is -// for i, v := range x { -// dst[i] = alpha * v -// } +// +// for i, v := range x { +// dst[i] = alpha * v +// } func ScalUnitaryTo(dst []float64, alpha float64, x []float64) { for i, v := range x { dst[i] = alpha * v @@ -28,11 +30,12 @@ func ScalUnitaryTo(dst []float64, alpha float64, x []float64) { } // ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } func ScalInc(alpha float64, x []float64, n, incX uintptr) { var ix uintptr for i := 0; i < int(n); i++ { @@ -42,12 +45,13 @@ func ScalInc(alpha float64, x []float64, n, incX uintptr) { } // ScalIncTo is -// var idst, ix uintptr -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha * x[ix] -// ix += incX -// idst += incDst -// } +// +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) { var idst, ix uintptr for i := 0; i < int(n); i++ { diff --git a/internal/asm/f64/stubs_amd64.go b/internal/asm/f64/stubs_amd64.go index bb9f4785..7139bedd 100644 --- a/internal/asm/f64/stubs_amd64.go +++ b/internal/asm/f64/stubs_amd64.go @@ -8,246 +8,270 @@ package f64 // L1Norm is -// for _, v := range x { -// sum += math.Abs(v) -// } -// return sum +// +// for _, v := range x { +// sum += math.Abs(v) +// } +// return sum func L1Norm(x []float64) (sum float64) // L1NormInc is -// for i := 0; i < n*incX; i += incX { -// sum += math.Abs(x[i]) -// } -// return sum +// +// for i := 0; i < n*incX; i += incX { +// sum += math.Abs(x[i]) +// } +// return sum func L1NormInc(x []float64, n, incX int) (sum float64) // AddConst is -// for i := range x { -// x[i] += alpha -// } +// +// for i := range x { +// x[i] += alpha +// } func AddConst(alpha float64, x []float64) // Add is -// for i, v := range s { -// dst[i] += v -// } +// +// for i, v := range s { +// dst[i] += v +// } func Add(dst, s []float64) // AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } +// +// for i, v := range x { +// y[i] += alpha * v +// } func AxpyUnitary(alpha float64, x, y []float64) // AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } +// +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) // AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } +// +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) // AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } +// +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) // CumSum is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] + v -// } -// return dst +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst func CumSum(dst, s []float64) []float64 // CumProd is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] * v -// } -// return dst +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst func CumProd(dst, s []float64) []float64 // Div is -// for i, v := range s { -// dst[i] /= v -// } +// +// for i, v := range s { +// dst[i] /= v +// } func Div(dst, s []float64) // DivTo is -// for i, v := range s { -// dst[i] = v / t[i] -// } -// return dst +// +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst func DivTo(dst, x, y []float64) []float64 // DotUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum +// +// for i, v := range x { +// sum += y[i] * v +// } +// return sum func DotUnitary(x, y []float64) (sum float64) // DotInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) // L1Dist is -// var norm float64 -// for i, v := range s { -// norm += math.Abs(t[i] - v) -// } -// return norm +// +// var norm float64 +// for i, v := range s { +// norm += math.Abs(t[i] - v) +// } +// return norm func L1Dist(s, t []float64) float64 // LinfDist is -// var norm float64 -// if len(s) == 0 { -// return 0 -// } -// norm = math.Abs(t[0] - s[0]) -// for i, v := range s[1:] { -// absDiff := math.Abs(t[i+1] - v) -// if absDiff > norm || math.IsNaN(norm) { -// norm = absDiff -// } -// } -// return norm +// +// var norm float64 +// if len(s) == 0 { +// return 0 +// } +// norm = math.Abs(t[0] - s[0]) +// for i, v := range s[1:] { +// absDiff := math.Abs(t[i+1] - v) +// if absDiff > norm || math.IsNaN(norm) { +// norm = absDiff +// } +// } +// return norm func LinfDist(s, t []float64) float64 // ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } +// +// for i := range x { +// x[i] *= alpha +// } func ScalUnitary(alpha float64, x []float64) // ScalUnitaryTo is -// for i, v := range x { -// dst[i] = alpha * v -// } +// +// for i, v := range x { +// dst[i] = alpha * v +// } func ScalUnitaryTo(dst []float64, alpha float64, x []float64) // ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } func ScalInc(alpha float64, x []float64, n, incX uintptr) // ScalIncTo is -// var idst, ix uintptr -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha * x[ix] -// ix += incX -// idst += incDst -// } +// +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) // Sum is -// var sum float64 -// for i := range x { -// sum += x[i] -// } +// +// var sum float64 +// for i := range x { +// sum += x[i] +// } func Sum(x []float64) float64 // L2NormUnitary returns the L2-norm of x. -// var scale float64 -// sumSquares := 1.0 -// for _, v := range x { -// if v == 0 { -// continue -// } -// absxi := math.Abs(v) -// if math.IsNaN(absxi) { -// return math.NaN() -// } -// if scale < absxi { -// s := scale / absxi -// sumSquares = 1 + sumSquares*s*s -// scale = absxi -// } else { -// s := absxi / scale -// sumSquares += s * s -// } -// if math.IsInf(scale, 1) { -// return math.Inf(1) -// } -// } -// return scale * math.Sqrt(sumSquares) +// +// var scale float64 +// sumSquares := 1.0 +// for _, v := range x { +// if v == 0 { +// continue +// } +// absxi := math.Abs(v) +// if math.IsNaN(absxi) { +// return math.NaN() +// } +// if scale < absxi { +// s := scale / absxi +// sumSquares = 1 + sumSquares*s*s +// scale = absxi +// } else { +// s := absxi / scale +// sumSquares += s * s +// } +// if math.IsInf(scale, 1) { +// return math.Inf(1) +// } +// } +// return scale * math.Sqrt(sumSquares) func L2NormUnitary(x []float64) (norm float64) // L2NormInc returns the L2-norm of x. -// var scale float64 -// sumSquares := 1.0 -// for ix := uintptr(0); ix < n*incX; ix += incX { -// val := x[ix] -// if val == 0 { -// continue -// } -// absxi := math.Abs(val) -// if math.IsNaN(absxi) { -// return math.NaN() -// } -// if scale < absxi { -// s := scale / absxi -// sumSquares = 1 + sumSquares*s*s -// scale = absxi -// } else { -// s := absxi / scale -// sumSquares += s * s -// } -// } -// if math.IsInf(scale, 1) { -// return math.Inf(1) -// } -// return scale * math.Sqrt(sumSquares) +// +// var scale float64 +// sumSquares := 1.0 +// for ix := uintptr(0); ix < n*incX; ix += incX { +// val := x[ix] +// if val == 0 { +// continue +// } +// absxi := math.Abs(val) +// if math.IsNaN(absxi) { +// return math.NaN() +// } +// if scale < absxi { +// s := scale / absxi +// sumSquares = 1 + sumSquares*s*s +// scale = absxi +// } else { +// s := absxi / scale +// sumSquares += s * s +// } +// } +// if math.IsInf(scale, 1) { +// return math.Inf(1) +// } +// return scale * math.Sqrt(sumSquares) func L2NormInc(x []float64, n, incX uintptr) (norm float64) // L2DistanceUnitary returns the L2-norm of x-y. -// var scale float64 -// sumSquares := 1.0 -// for i, v := range x { -// v -= y[i] -// if v == 0 { -// continue -// } -// absxi := math.Abs(v) -// if math.IsNaN(absxi) { -// return math.NaN() -// } -// if scale < absxi { -// s := scale / absxi -// sumSquares = 1 + sumSquares*s*s -// scale = absxi -// } else { -// s := absxi / scale -// sumSquares += s * s -// } -// } -// if math.IsInf(scale, 1) { -// return math.Inf(1) -// } -// return scale * math.Sqrt(sumSquares) +// +// var scale float64 +// sumSquares := 1.0 +// for i, v := range x { +// v -= y[i] +// if v == 0 { +// continue +// } +// absxi := math.Abs(v) +// if math.IsNaN(absxi) { +// return math.NaN() +// } +// if scale < absxi { +// s := scale / absxi +// sumSquares = 1 + sumSquares*s*s +// scale = absxi +// } else { +// s := absxi / scale +// sumSquares += s * s +// } +// } +// if math.IsInf(scale, 1) { +// return math.Inf(1) +// } +// return scale * math.Sqrt(sumSquares) func L2DistanceUnitary(x, y []float64) (norm float64) diff --git a/internal/asm/f64/stubs_noasm.go b/internal/asm/f64/stubs_noasm.go index 6fe9baf1..f0663791 100644 --- a/internal/asm/f64/stubs_noasm.go +++ b/internal/asm/f64/stubs_noasm.go @@ -10,10 +10,11 @@ package f64 import "math" // L1Norm is -// for _, v := range x { -// sum += math.Abs(v) -// } -// return sum +// +// for _, v := range x { +// sum += math.Abs(v) +// } +// return sum func L1Norm(x []float64) (sum float64) { for _, v := range x { sum += math.Abs(v) @@ -22,10 +23,11 @@ func L1Norm(x []float64) (sum float64) { } // L1NormInc is -// for i := 0; i < n*incX; i += incX { -// sum += math.Abs(x[i]) -// } -// return sum +// +// for i := 0; i < n*incX; i += incX { +// sum += math.Abs(x[i]) +// } +// return sum func L1NormInc(x []float64, n, incX int) (sum float64) { for i := 0; i < n*incX; i += incX { sum += math.Abs(x[i]) @@ -34,9 +36,10 @@ func L1NormInc(x []float64, n, incX int) (sum float64) { } // Add is -// for i, v := range s { -// dst[i] += v -// } +// +// for i, v := range s { +// dst[i] += v +// } func Add(dst, s []float64) { for i, v := range s { dst[i] += v @@ -44,9 +47,10 @@ func Add(dst, s []float64) { } // AddConst is -// for i := range x { -// x[i] += alpha -// } +// +// for i := range x { +// x[i] += alpha +// } func AddConst(alpha float64, x []float64) { for i := range x { x[i] += alpha @@ -54,14 +58,15 @@ func AddConst(alpha float64, x []float64) { } // CumSum is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] + v -// } -// return dst +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst func CumSum(dst, s []float64) []float64 { if len(s) == 0 { return dst @@ -74,14 +79,15 @@ func CumSum(dst, s []float64) []float64 { } // CumProd is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] * v -// } -// return dst +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst func CumProd(dst, s []float64) []float64 { if len(s) == 0 { return dst @@ -94,9 +100,10 @@ func CumProd(dst, s []float64) []float64 { } // Div is -// for i, v := range s { -// dst[i] /= v -// } +// +// for i, v := range s { +// dst[i] /= v +// } func Div(dst, s []float64) { for i, v := range s { dst[i] /= v @@ -104,10 +111,11 @@ func Div(dst, s []float64) { } // DivTo is -// for i, v := range s { -// dst[i] = v / t[i] -// } -// return dst +// +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst func DivTo(dst, s, t []float64) []float64 { for i, v := range s { dst[i] = v / t[i] @@ -116,11 +124,12 @@ func DivTo(dst, s, t []float64) []float64 { } // L1Dist is -// var norm float64 -// for i, v := range s { -// norm += math.Abs(t[i] - v) -// } -// return norm +// +// var norm float64 +// for i, v := range s { +// norm += math.Abs(t[i] - v) +// } +// return norm func L1Dist(s, t []float64) float64 { var norm float64 for i, v := range s { @@ -130,18 +139,19 @@ func L1Dist(s, t []float64) float64 { } // LinfDist is -// var norm float64 -// if len(s) == 0 { -// return 0 -// } -// norm = math.Abs(t[0] - s[0]) -// for i, v := range s[1:] { -// absDiff := math.Abs(t[i+1] - v) -// if absDiff > norm || math.IsNaN(norm) { -// norm = absDiff -// } -// } -// return norm +// +// var norm float64 +// if len(s) == 0 { +// return 0 +// } +// norm = math.Abs(t[0] - s[0]) +// for i, v := range s[1:] { +// absDiff := math.Abs(t[i+1] - v) +// if absDiff > norm || math.IsNaN(norm) { +// norm = absDiff +// } +// } +// return norm func LinfDist(s, t []float64) float64 { var norm float64 if len(s) == 0 { @@ -158,10 +168,11 @@ func LinfDist(s, t []float64) float64 { } // Sum is -// var sum float64 -// for i := range x { -// sum += x[i] -// } +// +// var sum float64 +// for i := range x { +// sum += x[i] +// } func Sum(x []float64) float64 { var sum float64 for _, v := range x { diff --git a/internal/cmplx64/cmath_test.go b/internal/cmplx64/cmath_test.go index 688bbeb8..eb216b8f 100644 --- a/internal/cmplx64/cmath_test.go +++ b/internal/cmplx64/cmath_test.go @@ -17,6 +17,7 @@ import ( // The higher-precision values in vc26 were used to derive the // input arguments vc (see also comment below). For reference // only (do not delete). +// //lint:ignore U1000 See comment above. var vc26 = []complex64{ (4.97901192488367350108546816 + 7.73887247457810456552351752i), diff --git a/internal/cmplx64/isnan.go b/internal/cmplx64/isnan.go index 7e0bf788..d6d43dbd 100644 --- a/internal/cmplx64/isnan.go +++ b/internal/cmplx64/isnan.go @@ -22,7 +22,7 @@ func IsNaN(x complex64) bool { return false } -// NaN returns a complex ``not-a-number'' value. +// NaN returns a complex “not-a-number” value. func NaN() complex64 { nan := math.NaN() return complex(nan, nan) diff --git a/internal/math32/math.go b/internal/math32/math.go index f428d7b7..5e92f3d0 100644 --- a/internal/math32/math.go +++ b/internal/math32/math.go @@ -24,6 +24,7 @@ const ( // Abs returns the absolute value of x. // // Special cases are: +// // Abs(±Inf) = +Inf // Abs(NaN) = NaN func Abs(x float32) float32 { @@ -47,6 +48,7 @@ func Copysign(x, y float32) float32 { // unnecessary overflow and underflow. // // Special cases are: +// // Hypot(±Inf, q) = +Inf // Hypot(p, ±Inf) = +Inf // Hypot(NaN, q) = NaN @@ -98,7 +100,7 @@ func IsInf(f float32, sign int) bool { return sign >= 0 && f > math.MaxFloat32 || sign <= 0 && f < -math.MaxFloat32 } -// IsNaN reports whether f is an IEEE 754 ``not-a-number'' value. +// IsNaN reports whether f is an IEEE 754 “not-a-number” value. func IsNaN(f float32) (is bool) { // IEEE 754 says that only NaNs satisfy f != f. // To avoid the floating-point hardware, could use: @@ -110,6 +112,7 @@ func IsNaN(f float32) (is bool) { // Max returns the larger of x or y. // // Special cases are: +// // Max(x, +Inf) = Max(+Inf, x) = +Inf // Max(x, NaN) = Max(NaN, x) = NaN // Max(+0, ±0) = Max(±0, +0) = +0 @@ -136,6 +139,7 @@ func Max(x, y float32) float32 { // Min returns the smaller of x or y. // // Special cases are: +// // Min(x, -Inf) = Min(-Inf, x) = -Inf // Min(x, NaN) = Min(NaN, x) = NaN // Min(-0, ±0) = Min(±0, -0) = -0 @@ -158,5 +162,5 @@ func Min(x, y float32) float32 { return y } -// NaN returns an IEEE 754 ``not-a-number'' value. +// NaN returns an IEEE 754 “not-a-number” value. func NaN() float32 { return math.Float32frombits(unan) } diff --git a/internal/math32/math_test.go b/internal/math32/math_test.go index aea3ab36..fabe1593 100644 --- a/internal/math32/math_test.go +++ b/internal/math32/math_test.go @@ -178,64 +178,66 @@ func TestSqrt(t *testing.T) { // // __ieee754_sqrt(x) // Return correctly rounded sqrt. -// ----------------------------------------- -// | Use the hardware sqrt if you have one | -// ----------------------------------------- +// +// ----------------------------------------- +// | Use the hardware sqrt if you have one | +// ----------------------------------------- +// // Method: -// Bit by bit method using integer arithmetic. (Slow, but portable) -// 1. Normalization -// Scale x to y in [1,4) with even powers of 2: -// find an integer k such that 1 <= (y=x*2**(2k)) < 4, then -// sqrt(x) = 2**k * sqrt(y) -// 2. Bit by bit computation -// Let q = sqrt(y) truncated to i bit after binary point (q = 1), -// i 0 -// i+1 2 -// s = 2*q , and y = 2 * ( y - q ). (1) -// i i i i // -// To compute q from q , one checks whether -// i+1 i +// Bit by bit method using integer arithmetic. (Slow, but portable) +// 1. Normalization +// Scale x to y in [1,4) with even powers of 2: +// find an integer k such that 1 <= (y=x*2**(2k)) < 4, then +// sqrt(x) = 2**k * sqrt(y) +// 2. Bit by bit computation +// Let q = sqrt(y) truncated to i bit after binary point (q = 1), +// i 0 +// i+1 2 +// s = 2*q , and y = 2 * ( y - q ). (1) +// i i i i // -// -(i+1) 2 -// (q + 2 ) <= y. (2) -// i -// -(i+1) -// If (2) is false, then q = q ; otherwise q = q + 2 . -// i+1 i i+1 i +// To compute q from q , one checks whether +// i+1 i // -// With some algebraic manipulation, it is not difficult to see -// that (2) is equivalent to -// -(i+1) -// s + 2 <= y (3) -// i i +// -(i+1) 2 +// (q + 2 ) <= y. (2) +// i +// -(i+1) +// If (2) is false, then q = q ; otherwise q = q + 2 . +// i+1 i i+1 i // -// The advantage of (3) is that s and y can be computed by -// i i -// the following recurrence formula: -// if (3) is false +// With some algebraic manipulation, it is not difficult to see +// that (2) is equivalent to +// -(i+1) +// s + 2 <= y (3) +// i i // -// s = s , y = y ; (4) -// i+1 i i+1 i +// The advantage of (3) is that s and y can be computed by +// i i +// the following recurrence formula: +// if (3) is false // -// otherwise, -// -i -(i+1) -// s = s + 2 , y = y - s - 2 (5) -// i+1 i i+1 i i +// s = s , y = y ; (4) +// i+1 i i+1 i // -// One may easily use induction to prove (4) and (5). -// Note. Since the left hand side of (3) contain only i+2 bits, -// it does not necessary to do a full (53-bit) comparison -// in (3). -// 3. Final rounding -// After generating the 53 bits result, we compute one more bit. -// Together with the remainder, we can decide whether the -// result is exact, bigger than 1/2ulp, or less than 1/2ulp -// (it will never equal to 1/2ulp). -// The rounding mode can be detected by checking whether -// huge + tiny is equal to huge, and whether huge - tiny is -// equal to huge for some floating point number "huge" and "tiny". +// otherwise, +// -i -(i+1) +// s = s + 2 , y = y - s - 2 (5) +// i+1 i i+1 i i // +// One may easily use induction to prove (4) and (5). +// Note. Since the left hand side of (3) contain only i+2 bits, +// it does not necessary to do a full (53-bit) comparison +// in (3). +// 3. Final rounding +// After generating the 53 bits result, we compute one more bit. +// Together with the remainder, we can decide whether the +// result is exact, bigger than 1/2ulp, or less than 1/2ulp +// (it will never equal to 1/2ulp). +// The rounding mode can be detected by checking whether +// huge + tiny is equal to huge, and whether huge - tiny is +// equal to huge for some floating point number "huge" and "tiny". func sqrt(x float32) float32 { // special cases switch { diff --git a/internal/math32/sqrt.go b/internal/math32/sqrt.go index afa428a1..41f4a134 100644 --- a/internal/math32/sqrt.go +++ b/internal/math32/sqrt.go @@ -14,6 +14,7 @@ import ( // Sqrt returns the square root of x. // // Special cases are: +// // Sqrt(+Inf) = +Inf // Sqrt(±0) = ±0 // Sqrt(x < 0) = NaN diff --git a/internal/math32/sqrt_amd64.go b/internal/math32/sqrt_amd64.go index 05df84ac..eca83f87 100644 --- a/internal/math32/sqrt_amd64.go +++ b/internal/math32/sqrt_amd64.go @@ -14,6 +14,7 @@ package math32 // Sqrt returns the square root of x. // // Special cases are: +// // Sqrt(+Inf) = +Inf // Sqrt(±0) = ±0 // Sqrt(x < 0) = NaN diff --git a/internal/math32/sqrt_arm64.go b/internal/math32/sqrt_arm64.go index 05df84ac..eca83f87 100644 --- a/internal/math32/sqrt_arm64.go +++ b/internal/math32/sqrt_arm64.go @@ -14,6 +14,7 @@ package math32 // Sqrt returns the square root of x. // // Special cases are: +// // Sqrt(+Inf) = +Inf // Sqrt(±0) = ±0 // Sqrt(x < 0) = NaN diff --git a/lapack/gonum/dbdsqr.go b/lapack/gonum/dbdsqr.go index acbf4534..e9c055b3 100644 --- a/lapack/gonum/dbdsqr.go +++ b/lapack/gonum/dbdsqr.go @@ -15,7 +15,9 @@ import ( // Dbdsqr performs a singular value decomposition of a real n×n bidiagonal matrix. // // The SVD of the bidiagonal matrix B is -// B = Q * S * Pᵀ +// +// B = Q * S * Pᵀ +// // where S is a diagonal matrix of singular values, Q is an orthogonal matrix of // left singular vectors, and P is an orthogonal matrix of right singular vectors. // @@ -25,7 +27,8 @@ import ( // // Frequently Dbdsqr is used in conjunction with Dgebrd which reduces a general // matrix A into bidiagonal form. In this case, the SVD of A is -// A = (U * Q) * S * (Pᵀ * VT) +// +// A = (U * Q) * S * (Pᵀ * VT) // // This routine may also compute Qᵀ * C. // diff --git a/lapack/gonum/dgebak.go b/lapack/gonum/dgebak.go index b8c61203..b6af972e 100644 --- a/lapack/gonum/dgebak.go +++ b/lapack/gonum/dgebak.go @@ -10,8 +10,10 @@ import ( ) // Dgebak updates an n×m matrix V as -// V = P D V if side == lapack.EVRight, -// V = P D^{-1} V if side == lapack.EVLeft, +// +// V = P D V if side == lapack.EVRight, +// V = P D^{-1} V if side == lapack.EVLeft, +// // where P and D are n×n permutation and scaling matrices, respectively, // implicitly represented by job, scale, ilo and ihi as returned by Dgebal. // diff --git a/lapack/gonum/dgebal.go b/lapack/gonum/dgebal.go index f4690b50..7623e2fa 100644 --- a/lapack/gonum/dgebal.go +++ b/lapack/gonum/dgebal.go @@ -16,9 +16,11 @@ import ( // // Permuting consists of applying a permutation matrix P such that the matrix // that results from Pᵀ*A*P takes the upper block triangular form -// [ T1 X Y ] -// Pᵀ A P = [ 0 B Z ], -// [ 0 0 T2 ] +// +// [ T1 X Y ] +// Pᵀ A P = [ 0 B Z ], +// [ 0 0 T2 ] +// // where T1 and T2 are upper triangular matrices and B contains at least one // nonzero off-diagonal element in each row and column. The indices ilo and ihi // mark the starting and ending columns of the submatrix B. The eigenvalues of A @@ -28,9 +30,11 @@ import ( // Scaling consists of applying a diagonal similarity transformation D such that // D^{-1}*B*D has the 1-norm of each row and its corresponding column nearly // equal. The output matrix is -// [ T1 X*D Y ] -// [ 0 inv(D)*B*D inv(D)*Z ]. -// [ 0 0 T2 ] +// +// [ T1 X*D Y ] +// [ 0 inv(D)*B*D inv(D)*Z ]. +// [ 0 0 T2 ] +// // Scaling may reduce the 1-norm of the matrix, and improve the accuracy of // the computed eigenvalues and/or eigenvectors. // @@ -41,16 +45,21 @@ import ( // If job is lapack.PermuteScale, both permuting and scaling will be done. // // On return, if job is lapack.Permute or lapack.PermuteScale, it will hold that -// A[i,j] == 0, for i > j and j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}. +// +// A[i,j] == 0, for i > j and j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}. +// // If job is lapack.BalanceNone or lapack.Scale, or if n == 0, it will hold that -// ilo == 0 and ihi == n-1. +// +// ilo == 0 and ihi == n-1. // // On return, scale will contain information about the permutations and scaling // factors applied to A. If π(j) denotes the index of the column interchanged // with column j, and D[j,j] denotes the scaling factor applied to column j, // then -// scale[j] == π(j), for j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}, -// == D[j,j], for j ∈ {ilo, ..., ihi}. +// +// scale[j] == π(j), for j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}, +// == D[j,j], for j ∈ {ilo, ..., ihi}. +// // scale must have length equal to n, otherwise Dgebal will panic. // // Dgebal is an internal routine. It is exported for testing purposes. diff --git a/lapack/gonum/dgebd2.go b/lapack/gonum/dgebd2.go index 6dc97ce5..4f323ec5 100644 --- a/lapack/gonum/dgebd2.go +++ b/lapack/gonum/dgebd2.go @@ -8,7 +8,9 @@ import "gonum.org/v1/gonum/blas" // Dgebd2 reduces an m×n matrix A to upper or lower bidiagonal form by an orthogonal // transformation. -// Qᵀ * A * P = B +// +// Qᵀ * A * P = B +// // if m >= n, B is upper diagonal, otherwise B is lower bidiagonal. // d is the diagonal, len = min(m,n) // e is the off-diagonal len = min(m,n)-1 diff --git a/lapack/gonum/dgebrd.go b/lapack/gonum/dgebrd.go index 8faef130..6b6654ba 100644 --- a/lapack/gonum/dgebrd.go +++ b/lapack/gonum/dgebrd.go @@ -11,7 +11,9 @@ import ( // Dgebrd reduces a general m×n matrix A to upper or lower bidiagonal form B by // an orthogonal transformation: -// Qᵀ * A * P = B. +// +// Qᵀ * A * P = B. +// // The diagonal elements of B are stored in d and the off-diagonal elements are stored // in e. These are additionally stored along the diagonal of A and the off-diagonal // of A. If m >= n B is an upper-bidiagonal matrix, and if m < n B is a @@ -19,27 +21,33 @@ import ( // // The remaining elements of A store the data needed to construct Q and P. // The matrices Q and P are products of elementary reflectors -// if m >= n, Q = H_0 * H_1 * ... * H_{n-1}, -// P = G_0 * G_1 * ... * G_{n-2}, -// if m < n, Q = H_0 * H_1 * ... * H_{m-2}, -// P = G_0 * G_1 * ... * G_{m-1}, +// +// if m >= n, Q = H_0 * H_1 * ... * H_{n-1}, +// P = G_0 * G_1 * ... * G_{n-2}, +// if m < n, Q = H_0 * H_1 * ... * H_{m-2}, +// P = G_0 * G_1 * ... * G_{m-1}, +// // where -// H_i = I - tauQ[i] * v_i * v_iᵀ, -// G_i = I - tauP[i] * u_i * u_iᵀ. +// +// H_i = I - tauQ[i] * v_i * v_iᵀ, +// G_i = I - tauP[i] * u_i * u_iᵀ. // // As an example, on exit the entries of A when m = 6, and n = 5 -// [ d e u1 u1 u1] -// [v1 d e u2 u2] -// [v1 v2 d e u3] -// [v1 v2 v3 d e] -// [v1 v2 v3 v4 d] -// [v1 v2 v3 v4 v5] +// +// [ d e u1 u1 u1] +// [v1 d e u2 u2] +// [v1 v2 d e u3] +// [v1 v2 v3 d e] +// [v1 v2 v3 v4 d] +// [v1 v2 v3 v4 v5] +// // and when m = 5, n = 6 -// [ d u1 u1 u1 u1 u1] -// [ e d u2 u2 u2 u2] -// [v1 e d u3 u3 u3] -// [v1 v2 e d u4 u4] -// [v1 v2 v3 e d u5] +// +// [ d u1 u1 u1 u1 u1] +// [ e d u2 u2 u2 u2] +// [v1 e d u3 u3 u3] +// [v1 v2 e d u4 u4] +// [v1 v2 v3 e d u5] // // d, tauQ, and tauP must all have length at least min(m,n), and e must have // length min(m,n) - 1, unless lwork is -1 when there is no check except for diff --git a/lapack/gonum/dgeev.go b/lapack/gonum/dgeev.go index 972e36cb..b49b66fc 100644 --- a/lapack/gonum/dgeev.go +++ b/lapack/gonum/dgeev.go @@ -17,22 +17,30 @@ import ( // // The right eigenvector v_j of A corresponding to an eigenvalue λ_j // is defined by -// A v_j = λ_j v_j, +// +// A v_j = λ_j v_j, +// // and the left eigenvector u_j corresponding to an eigenvalue λ_j is defined by -// u_jᴴ A = λ_j u_jᴴ, +// +// u_jᴴ A = λ_j u_jᴴ, +// // where u_jᴴ is the conjugate transpose of u_j. // // On return, A will be overwritten and the left and right eigenvectors will be // stored, respectively, in the columns of the n×n matrices VL and VR in the // same order as their eigenvalues. If the j-th eigenvalue is real, then -// u_j = VL[:,j], -// v_j = VR[:,j], +// +// u_j = VL[:,j], +// v_j = VR[:,j], +// // and if it is not real, then j and j+1 form a complex conjugate pair and the // eigenvectors can be recovered as -// u_j = VL[:,j] + i*VL[:,j+1], -// u_{j+1} = VL[:,j] - i*VL[:,j+1], -// v_j = VR[:,j] + i*VR[:,j+1], -// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// +// u_j = VL[:,j] + i*VL[:,j+1], +// u_{j+1} = VL[:,j] - i*VL[:,j+1], +// v_j = VR[:,j] + i*VR[:,j+1], +// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// // where i is the imaginary unit. The computed eigenvectors are normalized to // have Euclidean norm equal to 1 and largest component real. // diff --git a/lapack/gonum/dgehd2.go b/lapack/gonum/dgehd2.go index 15f45b5e..64b0cb40 100644 --- a/lapack/gonum/dgehd2.go +++ b/lapack/gonum/dgehd2.go @@ -11,9 +11,13 @@ import "gonum.org/v1/gonum/blas" // // The matrix Q is represented as a product of (ihi-ilo) elementary // reflectors -// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// // Each H_i has the form -// H_i = I - tau[i] * v * vᵀ +// +// H_i = I - tau[i] * v * vᵀ +// // where v is a real vector with v[0:i+1] = 0, v[i+1] = 1 and v[ihi+1:n] = 0. // v[i+2:ihi+1] is stored on exit in A[i+2:ihi+1,i]. // @@ -26,21 +30,25 @@ import "gonum.org/v1/gonum/blas" // The contents of A are illustrated by the following example, with n = 7, ilo = // 1 and ihi = 5. // On entry, -// [ a a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a ] +// +// [ a a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a ] +// // on return, -// [ a a h h h h a ] -// [ a h h h h a ] -// [ h h h h h h ] -// [ v1 h h h h h ] -// [ v1 v2 h h h h ] -// [ v1 v2 v3 h h h ] -// [ a ] +// +// [ a a h h h h a ] +// [ a h h h h a ] +// [ h h h h h h ] +// [ v1 h h h h h ] +// [ v1 v2 h h h h ] +// [ v1 v2 v3 h h h ] +// [ a ] +// // where a denotes an element of the original matrix A, h denotes a // modified element of the upper Hessenberg matrix H, and vi denotes an // element of the vector defining H_i. diff --git a/lapack/gonum/dgehrd.go b/lapack/gonum/dgehrd.go index 2fc75987..ae153302 100644 --- a/lapack/gonum/dgehrd.go +++ b/lapack/gonum/dgehrd.go @@ -15,9 +15,13 @@ import ( // // The matrix Q is represented as a product of (ihi-ilo) elementary // reflectors -// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// // Each H_i has the form -// H_i = I - tau[i] * v * vᵀ +// +// H_i = I - tau[i] * v * vᵀ +// // where v is a real vector with v[0:i+1] = 0, v[i+1] = 1 and v[ihi+1:n] = 0. // v[i+2:ihi+1] is stored on exit in A[i+2:ihi+1,i]. // @@ -30,21 +34,25 @@ import ( // The contents of a are illustrated by the following example, with n = 7, ilo = // 1 and ihi = 5. // On entry, -// [ a a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a ] +// +// [ a a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a ] +// // on return, -// [ a a h h h h a ] -// [ a h h h h a ] -// [ h h h h h h ] -// [ v1 h h h h h ] -// [ v1 v2 h h h h ] -// [ v1 v2 v3 h h h ] -// [ a ] +// +// [ a a h h h h a ] +// [ a h h h h a ] +// [ h h h h h h ] +// [ v1 h h h h h ] +// [ v1 v2 h h h h ] +// [ v1 v2 v3 h h h ] +// [ a ] +// // where a denotes an element of the original matrix A, h denotes a // modified element of the upper Hessenberg matrix H, and vi denotes an // element of the vector defining H_i. diff --git a/lapack/gonum/dgels.go b/lapack/gonum/dgels.go index cb0dbe86..496e8a7e 100644 --- a/lapack/gonum/dgels.go +++ b/lapack/gonum/dgels.go @@ -23,6 +23,7 @@ import ( // Aᵀ * X = B. // 4. If m < n and trans == blas.Trans, Dgels finds X such that || A*X - B||_2 // is minimized. +// // Note that the least-squares solutions (cases 1 and 3) perform the minimization // per column of B. This is not the same as finding the minimum-norm matrix. // diff --git a/lapack/gonum/dgeql2.go b/lapack/gonum/dgeql2.go index 73e42d1a..d18989d2 100644 --- a/lapack/gonum/dgeql2.go +++ b/lapack/gonum/dgeql2.go @@ -8,13 +8,19 @@ import "gonum.org/v1/gonum/blas" // Dgeql2 computes the QL factorization of the m×n matrix A. That is, Dgeql2 // computes Q and L such that -// A = Q * L +// +// A = Q * L +// // where Q is an m×m orthonormal matrix and L is a lower trapezoidal matrix. // // Q is represented as a product of elementary reflectors, -// Q = H_{k-1} * ... * H_1 * H_0 +// +// Q = H_{k-1} * ... * H_1 * H_0 +// // where k = min(m,n) and each H_i has the form -// H_i = I - tau[i] * v_i * v_iᵀ +// +// H_i = I - tau[i] * v_i * v_iᵀ +// // Vector v_i has v[m-k+i+1:m] = 0, v[m-k+i] = 1, and v[:m-k+i+1] is stored on // exit in A[0:m-k+i-1, n-k+i]. // diff --git a/lapack/gonum/dgeqp3.go b/lapack/gonum/dgeqp3.go index d072d288..f96f03be 100644 --- a/lapack/gonum/dgeqp3.go +++ b/lapack/gonum/dgeqp3.go @@ -13,9 +13,13 @@ import ( // m×n matrix A: A*P = Q*R using Level 3 BLAS. // // The matrix Q is represented as a product of elementary reflectors -// Q = H_0 H_1 . . . H_{k-1}, where k = min(m,n). +// +// Q = H_0 H_1 . . . H_{k-1}, where k = min(m,n). +// // Each H_i has the form -// H_i = I - tau * v * vᵀ +// +// H_i = I - tau * v * vᵀ +// // where tau and v are real vectors with v[0:i-1] = 0 and v[i] = 1; // v[i:m] is stored on exit in A[i:m, i], and tau in tau[i]. // diff --git a/lapack/gonum/dgeqr2.go b/lapack/gonum/dgeqr2.go index 57bf3772..c02f8a12 100644 --- a/lapack/gonum/dgeqr2.go +++ b/lapack/gonum/dgeqr2.go @@ -19,9 +19,11 @@ import "gonum.org/v1/gonum/blas" // // The ith elementary reflector can be explicitly constructed by first extracting // the -// v[j] = 0 j < i -// v[j] = 1 j == i -// v[j] = a[j*lda+i] j > i +// +// v[j] = 0 j < i +// v[j] = 1 j == i +// v[j] = a[j*lda+i] j > i +// // and computing H_i = I - tau[i] * v * vᵀ. // // The orthonormal matrix Q can be constructed from a product of these elementary diff --git a/lapack/gonum/dgerq2.go b/lapack/gonum/dgerq2.go index a06dec5a..44ca1bc1 100644 --- a/lapack/gonum/dgerq2.go +++ b/lapack/gonum/dgerq2.go @@ -7,7 +7,9 @@ package gonum import "gonum.org/v1/gonum/blas" // Dgerq2 computes an RQ factorization of the m×n matrix A, -// A = R * Q. +// +// A = R * Q. +// // On exit, if m <= n, the upper triangle of the subarray // A[0:m, n-m:n] contains the m×m upper triangular matrix R. // If m >= n, the elements on and above the (m-n)-th subdiagonal @@ -17,9 +19,13 @@ import "gonum.org/v1/gonum/blas" // reflectors. // // The matrix Q is represented as a product of elementary reflectors -// Q = H_0 H_1 . . . H_{min(m,n)-1}. +// +// Q = H_0 H_1 . . . H_{min(m,n)-1}. +// // Each H(i) has the form -// H_i = I - tau_i * v * vᵀ +// +// H_i = I - tau_i * v * vᵀ +// // where v is a vector with v[0:n-k+i-1] stored in A[m-k+i, 0:n-k+i-1], // v[n-k+i:n] = 0 and v[n-k+i] = 1. // diff --git a/lapack/gonum/dgerqf.go b/lapack/gonum/dgerqf.go index 86115548..fe010b47 100644 --- a/lapack/gonum/dgerqf.go +++ b/lapack/gonum/dgerqf.go @@ -10,7 +10,9 @@ import ( ) // Dgerqf computes an RQ factorization of the m×n matrix A, -// A = R * Q. +// +// A = R * Q. +// // On exit, if m <= n, the upper triangle of the subarray // A[0:m, n-m:n] contains the m×m upper triangular matrix R. // If m >= n, the elements on and above the (m-n)-th subdiagonal @@ -20,9 +22,13 @@ import ( // reflectors. // // The matrix Q is represented as a product of elementary reflectors -// Q = H_0 H_1 . . . H_{min(m,n)-1}. +// +// Q = H_0 H_1 . . . H_{min(m,n)-1}. +// // Each H(i) has the form -// H_i = I - tau_i * v * vᵀ +// +// H_i = I - tau_i * v * vᵀ +// // where v is a vector with v[0:n-k+i-1] stored in A[m-k+i, 0:n-k+i-1], // v[n-k+i:n] = 0 and v[n-k+i] = 1. // diff --git a/lapack/gonum/dgesc2.go b/lapack/gonum/dgesc2.go index ec8f04e5..b2201085 100644 --- a/lapack/gonum/dgesc2.go +++ b/lapack/gonum/dgesc2.go @@ -11,17 +11,23 @@ import ( ) // Dgesc2 solves a system of linear equations -// A * x = scale * b +// +// A * x = scale * b +// // with a general n×n matrix A represented by the LU factorization with complete // pivoting -// A = P * L * U * Q +// +// A = P * L * U * Q +// // as computed by Dgetc2. // // On entry, rhs contains the right hand side vector b. On return, it is // overwritten with the solution vector x. // // Dgesc2 returns a scale factor -// 0 <= scale <= 1 +// +// 0 <= scale <= 1 +// // chosen to prevent overflow in the solution. // // Dgesc2 is an internal routine. It is exported for testing purposes. diff --git a/lapack/gonum/dgesv.go b/lapack/gonum/dgesv.go index 53ba884d..0be4414c 100644 --- a/lapack/gonum/dgesv.go +++ b/lapack/gonum/dgesv.go @@ -7,12 +7,16 @@ package gonum import "gonum.org/v1/gonum/blas" // Dgesv computes the solution to a real system of linear equations -// A * X = B +// +// A * X = B +// // where A is an n×n matrix and X and B are n×nrhs matrices. // // The LU decomposition with partial pivoting and row interchanges is used to // factor A as -// A = P * L * U +// +// A = P * L * U +// // where P is a permutation matrix, L is unit lower triangular, and U is upper // triangular. On return, the factors L and U are stored in a; the unit diagonal // elements of L are not stored. The row pivot indices that define the diff --git a/lapack/gonum/dgesvd.go b/lapack/gonum/dgesvd.go index fbaef9fb..e0f8040f 100644 --- a/lapack/gonum/dgesvd.go +++ b/lapack/gonum/dgesvd.go @@ -17,7 +17,9 @@ const noSVDO = "dgesvd: not coded for overwrite" // Dgesvd computes the singular value decomposition of the input matrix A. // // The singular value decomposition is -// A = U * Sigma * Vᵀ +// +// A = U * Sigma * Vᵀ +// // where Sigma is an m×n diagonal matrix containing the singular values of A, // U is an m×m orthogonal matrix and V is an n×n orthogonal matrix. The first // min(m,n) columns of U and V are the left and right singular vectors of A @@ -25,10 +27,12 @@ const noSVDO = "dgesvd: not coded for overwrite" // // jobU and jobVT are options for computing the singular vectors. The behavior // is as follows -// jobU == lapack.SVDAll All m columns of U are returned in u -// jobU == lapack.SVDStore The first min(m,n) columns are returned in u -// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a -// jobU == lapack.SVDNone The columns of U are not computed. +// +// jobU == lapack.SVDAll All m columns of U are returned in u +// jobU == lapack.SVDStore The first min(m,n) columns are returned in u +// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a +// jobU == lapack.SVDNone The columns of U are not computed. +// // The behavior is the same for jobVT and the rows of Vᵀ. At most one of jobU // and jobVT can equal lapack.SVDOverwrite, and Dgesvd will panic otherwise. // diff --git a/lapack/gonum/dgetc2.go b/lapack/gonum/dgetc2.go index 9884193c..41203e9f 100644 --- a/lapack/gonum/dgetc2.go +++ b/lapack/gonum/dgetc2.go @@ -12,7 +12,9 @@ import ( // Dgetc2 computes an LU factorization with complete pivoting of the n×n matrix // A. The factorization has the form -// A = P * L * U * Q, +// +// A = P * L * U * Q, +// // where P and Q are permutation matrices, L is lower triangular with unit // diagonal elements and U is upper triangular. // diff --git a/lapack/gonum/dgetf2.go b/lapack/gonum/dgetf2.go index 63ad72e9..b773f658 100644 --- a/lapack/gonum/dgetf2.go +++ b/lapack/gonum/dgetf2.go @@ -12,7 +12,9 @@ import ( // Dgetf2 computes the LU decomposition of the m×n matrix A. // The LU decomposition is a factorization of a into -// A = P * L * U +// +// A = P * L * U +// // where P is a permutation matrix, L is a unit lower triangular matrix, and // U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored // in place into a. diff --git a/lapack/gonum/dgetrf.go b/lapack/gonum/dgetrf.go index ad01e71e..35dde05c 100644 --- a/lapack/gonum/dgetrf.go +++ b/lapack/gonum/dgetrf.go @@ -11,7 +11,9 @@ import ( // Dgetrf computes the LU decomposition of the m×n matrix A. // The LU decomposition is a factorization of A into -// A = P * L * U +// +// A = P * L * U +// // where P is a permutation matrix, L is a unit lower triangular matrix, and // U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored // in place into a. diff --git a/lapack/gonum/dgetrs.go b/lapack/gonum/dgetrs.go index 55319345..35b33aa7 100644 --- a/lapack/gonum/dgetrs.go +++ b/lapack/gonum/dgetrs.go @@ -11,8 +11,10 @@ import ( // Dgetrs solves a system of equations using an LU factorization. // The system of equations solved is -// A * X = B if trans == blas.Trans -// Aᵀ * X = B if trans == blas.NoTrans +// +// A * X = B if trans == blas.Trans +// Aᵀ * X = B if trans == blas.NoTrans +// // A is a general n×n matrix with stride lda. B is a general matrix of size n×nrhs. // // On entry b contains the elements of the matrix B. On exit, b contains the diff --git a/lapack/gonum/dggsvd3.go b/lapack/gonum/dggsvd3.go index d5c8be9e..cfe10efa 100644 --- a/lapack/gonum/dggsvd3.go +++ b/lapack/gonum/dggsvd3.go @@ -13,9 +13,11 @@ import ( // Dggsvd3 computes the generalized singular value decomposition (GSVD) // of an m×n matrix A and p×n matrix B: -// Uᵀ*A*Q = D1*[ 0 R ] // -// Vᵀ*B*Q = D2*[ 0 R ] +// Uᵀ*A*Q = D1*[ 0 R ] +// +// Vᵀ*B*Q = D2*[ 0 R ] +// // where U, V and Q are orthogonal matrices. // // Dggsvd3 returns k and l, the dimensions of the sub-blocks. k+l @@ -26,62 +28,69 @@ import ( // // If m-k-l >= 0, // -// k l -// D1 = k [ I 0 ] -// l [ 0 C ] -// m-k-l [ 0 0 ] +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] // -// k l -// D2 = l [ 0 S ] -// p-l [ 0 0 ] +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] // -// n-k-l k l -// [ 0 R ] = k [ 0 R11 R12 ] k -// l [ 0 0 R22 ] l +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l // // where // -// C = diag( alpha_k, ... , alpha_{k+l} ), -// S = diag( beta_k, ... , beta_{k+l} ), -// C^2 + S^2 = I. +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. // // R is stored in -// A[0:k+l, n-k-l:n] +// +// A[0:k+l, n-k-l:n] +// // on exit. // // If m-k-l < 0, // -// k m-k k+l-m -// D1 = k [ I 0 0 ] -// m-k [ 0 C 0 ] +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] // -// k m-k k+l-m -// D2 = m-k [ 0 S 0 ] -// k+l-m [ 0 0 I ] -// p-l [ 0 0 0 ] +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] // -// n-k-l k m-k k+l-m -// [ 0 R ] = k [ 0 R11 R12 R13 ] -// m-k [ 0 0 R22 R23 ] -// k+l-m [ 0 0 0 R33 ] +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] // // where -// C = diag( alpha_k, ... , alpha_m ), -// S = diag( beta_k, ... , beta_m ), -// C^2 + S^2 = I. // -// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] -// [ 0 R22 R23 ] +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] +// [ 0 R22 R23 ] +// // and R33 is stored in -// B[m-k:l, n+m-k-l:n] on exit. +// +// B[m-k:l, n+m-k-l:n] on exit. // // Dggsvd3 computes C, S, R, and optionally the orthogonal transformation // matrices U, V and Q. // // jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior // is as follows -// jobU == lapack.GSVDU Compute orthogonal matrix U -// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// // The behavior is the same for jobV and jobQ with the exception that instead of // lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. // The matrices U, V and Q must be m×m, p×p and n×n respectively unless the @@ -89,17 +98,24 @@ import ( // // alpha and beta must have length n or Dggsvd3 will panic. On exit, alpha and // beta contain the generalized singular value pairs of A and B -// alpha[0:k] = 1, -// beta[0:k] = 0, +// +// alpha[0:k] = 1, +// beta[0:k] = 0, +// // if m-k-l >= 0, -// alpha[k:k+l] = diag(C), -// beta[k:k+l] = diag(S), +// +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// // if m-k-l < 0, -// alpha[k:m]= C, alpha[m:k+l]= 0 -// beta[k:m] = S, beta[m:k+l] = 1. +// +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// // if k+l < n, -// alpha[k+l:n] = 0 and -// beta[k+l:n] = 0. +// +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. // // On exit, iwork contains the permutation required to sort alpha descending. // diff --git a/lapack/gonum/dggsvp3.go b/lapack/gonum/dggsvp3.go index 902260c1..ace13c6c 100644 --- a/lapack/gonum/dggsvp3.go +++ b/lapack/gonum/dggsvp3.go @@ -13,18 +13,18 @@ import ( // Dggsvp3 computes orthogonal matrices U, V and Q such that // -// n-k-l k l -// Uᵀ*A*Q = k [ 0 A12 A13 ] if m-k-l >= 0; -// l [ 0 0 A23 ] -// m-k-l [ 0 0 0 ] +// n-k-l k l +// Uᵀ*A*Q = k [ 0 A12 A13 ] if m-k-l >= 0; +// l [ 0 0 A23 ] +// m-k-l [ 0 0 0 ] // -// n-k-l k l -// Uᵀ*A*Q = k [ 0 A12 A13 ] if m-k-l < 0; -// m-k [ 0 0 A23 ] +// n-k-l k l +// Uᵀ*A*Q = k [ 0 A12 A13 ] if m-k-l < 0; +// m-k [ 0 0 A23 ] // -// n-k-l k l -// Vᵀ*B*Q = l [ 0 0 B13 ] -// p-l [ 0 0 0 ] +// n-k-l k l +// Vᵀ*B*Q = l [ 0 0 B13 ] +// p-l [ 0 0 0 ] // // where the k×k matrix A12 and l×l matrix B13 are non-singular // upper triangular. A23 is l×l upper triangular if m-k-l >= 0, @@ -35,8 +35,10 @@ import ( // // jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior // is as follows -// jobU == lapack.GSVDU Compute orthogonal matrix U -// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// // The behavior is the same for jobV and jobQ with the exception that instead of // lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. // The matrices U, V and Q must be m×m, p×p and n×n respectively unless the @@ -45,8 +47,10 @@ import ( // tola and tolb are the convergence criteria for the Jacobi-Kogbetliantz // iteration procedure. Generally, they are the same as used in the preprocessing // step, for example, -// tola = max(m, n)*norm(A)*eps, -// tolb = max(p, n)*norm(B)*eps. +// +// tola = max(m, n)*norm(A)*eps, +// tolb = max(p, n)*norm(B)*eps. +// // Where eps is the machine epsilon. // // iwork must have length n, work must have length at least max(1, lwork), and diff --git a/lapack/gonum/dgtsv.go b/lapack/gonum/dgtsv.go index e9e8af99..944af1a6 100644 --- a/lapack/gonum/dgtsv.go +++ b/lapack/gonum/dgtsv.go @@ -7,7 +7,9 @@ package gonum import "math" // Dgtsv solves the equation -// A * X = B +// +// A * X = B +// // where A is an n×n tridiagonal matrix. It uses Gaussian elimination with // partial pivoting. The equation Aᵀ * X = B may be solved by swapping the // arguments for du and dl. diff --git a/lapack/gonum/dhseqr.go b/lapack/gonum/dhseqr.go index 61390fbd..80fe19bb 100644 --- a/lapack/gonum/dhseqr.go +++ b/lapack/gonum/dhseqr.go @@ -13,14 +13,17 @@ import ( // Dhseqr computes the eigenvalues of an n×n Hessenberg matrix H and, // optionally, the matrices T and Z from the Schur decomposition -// H = Z T Zᵀ, +// +// H = Z T Zᵀ, +// // where T is an n×n upper quasi-triangular matrix (the Schur form), and Z is // the n×n orthogonal matrix of Schur vectors. // // Optionally Z may be postmultiplied into an input orthogonal matrix Q so that // this routine can give the Schur factorization of a matrix A which has been // reduced to the Hessenberg form H by the orthogonal matrix Q: -// A = Q H Qᵀ = (QZ) T (QZ)ᵀ. +// +// A = Q H Qᵀ = (QZ) T (QZ)ᵀ. // // If job == lapack.EigenvaluesOnly, only the eigenvalues will be computed. // If job == lapack.EigenvaluesAndSchur, the eigenvalues and the Schur form T will @@ -38,13 +41,16 @@ import ( // ilo and ihi determine the block of H on which Dhseqr operates. It is assumed // that H is already upper triangular in rows and columns [0:ilo] and [ihi+1:n], // although it will be only checked that the block is isolated, that is, -// ilo == 0 or H[ilo,ilo-1] == 0, -// ihi == n-1 or H[ihi+1,ihi] == 0, +// +// ilo == 0 or H[ilo,ilo-1] == 0, +// ihi == n-1 or H[ihi+1,ihi] == 0, +// // and Dhseqr will panic otherwise. ilo and ihi are typically set by a previous // call to Dgebal, otherwise they should be set to 0 and n-1, respectively. It // must hold that -// 0 <= ilo <= ihi < n if n > 0, -// ilo == 0 and ihi == -1 if n == 0. +// +// 0 <= ilo <= ihi < n if n > 0, +// ilo == 0 and ihi == -1 if n == 0. // // wr and wi must have length n. // @@ -69,15 +75,22 @@ import ( // contain the upper quasi-triangular matrix T from the Schur decomposition (the // Schur form). 2×2 diagonal blocks (corresponding to complex conjugate pairs of // eigenvalues) will be returned in standard form, with -// H[i,i] == H[i+1,i+1], +// +// H[i,i] == H[i+1,i+1], +// // and -// H[i+1,i]*H[i,i+1] < 0. +// +// H[i+1,i]*H[i,i+1] < 0. +// // The eigenvalues will be stored in wr and wi in the same order as on the // diagonal of the Schur form returned in H, with -// wr[i] = H[i,i], +// +// wr[i] = H[i,i], +// // and, if H[i:i+2,i:i+2] is a 2×2 diagonal block, -// wi[i] = sqrt(-H[i+1,i]*H[i,i+1]), -// wi[i+1] = -wi[i]. +// +// wi[i] = sqrt(-H[i+1,i]*H[i,i+1]), +// wi[i+1] = -wi[i]. // // If unconverged == 0 and job == lapack.EigenvaluesOnly, the contents of h // on return is unspecified. @@ -92,30 +105,37 @@ import ( // // If unconverged > 0 and job == lapack.EigenvaluesAndSchur, then on // return -// (initial H) U = U (final H), (*) +// +// (initial H) U = U (final H), (*) +// // where U is an orthogonal matrix. The final H is upper Hessenberg and // H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. // // If unconverged > 0 and compz == lapack.SchurOrig, then on return -// (final Z) = (initial Z) U, +// +// (final Z) = (initial Z) U, +// // where U is the orthogonal matrix in (*) regardless of the value of job. // // If unconverged > 0 and compz == lapack.SchurHess, then on return -// (final Z) = U, +// +// (final Z) = U, +// // where U is the orthogonal matrix in (*) regardless of the value of job. // // References: -// [1] R. Byers. LAPACK 3.1 xHSEQR: Tuning and Implementation Notes on the -// Small Bulge Multi-Shift QR Algorithm with Aggressive Early Deflation. -// LAPACK Working Note 187 (2007) -// URL: http://www.netlib.org/lapack/lawnspdf/lawn187.pdf -// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: -// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix -// Anal. Appl. 23(4) (2002), pp. 929—947 -// URL: http://dx.doi.org/10.1137/S0895479801384573 -// [3] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: -// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 -// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +// [1] R. Byers. LAPACK 3.1 xHSEQR: Tuning and Implementation Notes on the +// Small Bulge Multi-Shift QR Algorithm with Aggressive Early Deflation. +// LAPACK Working Note 187 (2007) +// URL: http://www.netlib.org/lapack/lawnspdf/lawn187.pdf +// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: +// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix +// Anal. Appl. 23(4) (2002), pp. 929—947 +// URL: http://dx.doi.org/10.1137/S0895479801384573 +// [3] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 // // Dhseqr is an internal routine. It is exported for testing purposes. func (impl Implementation) Dhseqr(job lapack.SchurJob, compz lapack.SchurComp, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, z []float64, ldz int, work []float64, lwork int) (unconverged int) { diff --git a/lapack/gonum/dlabrd.go b/lapack/gonum/dlabrd.go index 8f7d2949..396242cc 100644 --- a/lapack/gonum/dlabrd.go +++ b/lapack/gonum/dlabrd.go @@ -11,7 +11,9 @@ import ( // Dlabrd reduces the first NB rows and columns of a real general m×n matrix // A to upper or lower bidiagonal form by an orthogonal transformation -// Q**T * A * P +// +// Q**T * A * P +// // If m >= n, A is reduced to upper bidiagonal form and upon exit the elements // on and below the diagonal in the first nb columns represent the elementary // reflectors, and the elements above the diagonal in the first nb rows represent @@ -22,29 +24,37 @@ import ( // elements, and e are the off-diagonal elements. // // The matrices Q and P are products of elementary reflectors -// Q = H_0 * H_1 * ... * H_{nb-1} -// P = G_0 * G_1 * ... * G_{nb-1} +// +// Q = H_0 * H_1 * ... * H_{nb-1} +// P = G_0 * G_1 * ... * G_{nb-1} +// // where -// H_i = I - tauQ[i] * v_i * v_iᵀ -// G_i = I - tauP[i] * u_i * u_iᵀ +// +// H_i = I - tauQ[i] * v_i * v_iᵀ +// G_i = I - tauP[i] * u_i * u_iᵀ // // As an example, on exit the entries of A when m = 6, n = 5, and nb = 2 -// [ 1 1 u1 u1 u1] -// [v1 1 1 u2 u2] -// [v1 v2 a a a] -// [v1 v2 a a a] -// [v1 v2 a a a] -// [v1 v2 a a a] +// +// [ 1 1 u1 u1 u1] +// [v1 1 1 u2 u2] +// [v1 v2 a a a] +// [v1 v2 a a a] +// [v1 v2 a a a] +// [v1 v2 a a a] +// // and when m = 5, n = 6, and nb = 2 -// [ 1 u1 u1 u1 u1 u1] -// [ 1 1 u2 u2 u2 u2] -// [v1 1 a a a a] -// [v1 v2 a a a a] -// [v1 v2 a a a a] +// +// [ 1 u1 u1 u1 u1 u1] +// [ 1 1 u2 u2 u2 u2] +// [v1 1 a a a a] +// [v1 v2 a a a a] +// [v1 v2 a a a a] // // Dlabrd also returns the matrices X and Y which are used with U and V to // apply the transformation to the unreduced part of the matrix -// A := A - V*Yᵀ - X*Uᵀ +// +// A := A - V*Yᵀ - X*Uᵀ +// // and returns the matrices X and Y which are needed to apply the // transformation to the unreduced part of A. // diff --git a/lapack/gonum/dlacn2.go b/lapack/gonum/dlacn2.go index d97e5edf..cd6cf719 100644 --- a/lapack/gonum/dlacn2.go +++ b/lapack/gonum/dlacn2.go @@ -17,8 +17,10 @@ import ( // used on the next call. // On the initial call, kase must be 0. // In between calls, x must be overwritten by -// A * X if kase was returned as 1, -// Aᵀ * X if kase was returned as 2, +// +// A * X if kase was returned as 1, +// Aᵀ * X if kase was returned as 2, +// // and all other parameters must not be changed. // On the final return, kase is returned as 0, v contains A*W where W is a // vector, and est = norm(V)/norm(W) is a lower bound for 1-norm of A. diff --git a/lapack/gonum/dlae2.go b/lapack/gonum/dlae2.go index c071fec7..2eda3a18 100644 --- a/lapack/gonum/dlae2.go +++ b/lapack/gonum/dlae2.go @@ -7,8 +7,10 @@ package gonum import "math" // Dlae2 computes the eigenvalues of a 2×2 symmetric matrix -// [a b] -// [b c] +// +// [a b] +// [b c] +// // and returns the eigenvalue with the larger absolute value as rt1 and the // smaller as rt2. // diff --git a/lapack/gonum/dlaev2.go b/lapack/gonum/dlaev2.go index 74d75b91..56923f51 100644 --- a/lapack/gonum/dlaev2.go +++ b/lapack/gonum/dlaev2.go @@ -8,12 +8,15 @@ import "math" // Dlaev2 computes the Eigen decomposition of a symmetric 2×2 matrix. // The matrix is given by -// [a b] -// [b c] +// +// [a b] +// [b c] +// // Dlaev2 returns rt1 and rt2, the eigenvalues of the matrix where |RT1| > |RT2|, // and [cs1, sn1] which is the unit right eigenvalue for RT1. -// [ cs1 sn1] [a b] [cs1 -sn1] = [rt1 0] -// [-sn1 cs1] [b c] [sn1 cs1] [ 0 rt2] +// +// [ cs1 sn1] [a b] [cs1 -sn1] = [rt1 0] +// [-sn1 cs1] [b c] [sn1 cs1] [ 0 rt2] // // Dlaev2 is an internal routine. It is exported for testing purposes. func (impl Implementation) Dlaev2(a, b, c float64) (rt1, rt2, cs1, sn1 float64) { diff --git a/lapack/gonum/dlag2.go b/lapack/gonum/dlag2.go index deeb69db..cd644b65 100644 --- a/lapack/gonum/dlag2.go +++ b/lapack/gonum/dlag2.go @@ -7,12 +7,16 @@ package gonum import "math" // Dlag2 computes the eigenvalues of a 2×2 generalized eigenvalue problem -// A - w*B +// +// A - w*B +// // where B is an upper triangular matrix. // // Dlag2 uses scaling as necessary to avoid over-/underflow. Scaling results in // a modified eigenvalue problem -// s*A - w*B +// +// s*A - w*B +// // where s is a non-negative scaling factor chosen so that w, w*B, and s*A do // not overflow and, if possible, do not underflow, either. // @@ -22,14 +26,14 @@ import "math" // the corresponding exact eigenvalue is sufficiently large. // // If the eigenvalues are real, then: -// - wi is zero, -// - the eigenvalues are wr1/scale1 and wr2/scale2. +// - wi is zero, +// - the eigenvalues are wr1/scale1 and wr2/scale2. // // If the eigenvalues are complex, then: -// - wi is non-negative, -// - the eigenvalues are (wr1 ± wi*i)/scale1, -// - wr1 = wr2, -// - scale1 = scale2. +// - wi is non-negative, +// - the eigenvalues are (wr1 ± wi*i)/scale1, +// - wr1 = wr2, +// - scale1 = scale2. // // Dlag2 assumes that the one-norm of A and B is less than 1/dlamchS. Entries of // A less than sqrt(dlamchS)*norm(A) are subject to being treated as zero. The diff --git a/lapack/gonum/dlags2.go b/lapack/gonum/dlags2.go index 1622275d..7bd4f219 100644 --- a/lapack/gonum/dlags2.go +++ b/lapack/gonum/dlags2.go @@ -11,24 +11,28 @@ import "math" // // If upper is true // -// Uᵀ*A*Q = Uᵀ*[ a1 a2 ]*Q = [ x 0 ] -// [ 0 a3 ] [ x x ] +// Uᵀ*A*Q = Uᵀ*[ a1 a2 ]*Q = [ x 0 ] +// [ 0 a3 ] [ x x ] +// // and -// Vᵀ*B*Q = Vᵀ*[ b1 b2 ]*Q = [ x 0 ] -// [ 0 b3 ] [ x x ] +// +// Vᵀ*B*Q = Vᵀ*[ b1 b2 ]*Q = [ x 0 ] +// [ 0 b3 ] [ x x ] // // otherwise // -// Uᵀ*A*Q = Uᵀ*[ a1 0 ]*Q = [ x x ] -// [ a2 a3 ] [ 0 x ] +// Uᵀ*A*Q = Uᵀ*[ a1 0 ]*Q = [ x x ] +// [ a2 a3 ] [ 0 x ] +// // and -// Vᵀ*B*Q = Vᵀ*[ b1 0 ]*Q = [ x x ] -// [ b2 b3 ] [ 0 x ]. +// +// Vᵀ*B*Q = Vᵀ*[ b1 0 ]*Q = [ x x ] +// [ b2 b3 ] [ 0 x ]. // // The rows of the transformed A and B are parallel, where // -// U = [ csu snu ], V = [ csv snv ], Q = [ csq snq ] -// [ -snu csu ] [ -snv csv ] [ -snq csq ] +// U = [ csu snu ], V = [ csv snv ], Q = [ csq snq ] +// [ -snu csu ] [ -snv csv ] [ -snq csq ] // // Dlags2 is an internal routine. It is exported for testing purposes. func (impl Implementation) Dlags2(upper bool, a1, a2, a3, b1, b2, b3 float64) (csu, snu, csv, snv, csq, snq float64) { diff --git a/lapack/gonum/dlagtm.go b/lapack/gonum/dlagtm.go index 64d1fb35..fc8c8eb4 100644 --- a/lapack/gonum/dlagtm.go +++ b/lapack/gonum/dlagtm.go @@ -7,8 +7,10 @@ package gonum import "gonum.org/v1/gonum/blas" // Dlagtm performs one of the matrix-matrix operations -// C = alpha * A * B + beta * C if trans == blas.NoTrans -// C = alpha * Aᵀ * B + beta * C if trans == blas.Trans or blas.ConjTrans +// +// C = alpha * A * B + beta * C if trans == blas.NoTrans +// C = alpha * Aᵀ * B + beta * C if trans == blas.Trans or blas.ConjTrans +// // where A is an m×m tridiagonal matrix represented by its diagonals dl, d, du, // B and C are m×n dense matrices, and alpha and beta are scalars. func (impl Implementation) Dlagtm(trans blas.Transpose, m, n int, alpha float64, dl, d, du []float64, b []float64, ldb int, beta float64, c []float64, ldc int) { diff --git a/lapack/gonum/dlahqr.go b/lapack/gonum/dlahqr.go index 00a869bc..13f28560 100644 --- a/lapack/gonum/dlahqr.go +++ b/lapack/gonum/dlahqr.go @@ -19,9 +19,13 @@ import ( // quasi-triangular, although this is not checked. // // It must hold that -// 0 <= ilo <= max(0,ihi), and ihi < n, +// +// 0 <= ilo <= max(0,ihi), and ihi < n, +// // and that -// H[ilo,ilo-1] == 0, if ilo > 0, +// +// H[ilo,ilo-1] == 0, if ilo > 0, +// // otherwise Dlahqr will panic. // // If unconverged is zero on return, wr[ilo:ihi+1] and wi[ilo:ihi+1] will contain @@ -37,7 +41,9 @@ import ( // // z and ldz represent an n×n matrix Z. If wantz is true, the transformations // will be applied to the submatrix Z[iloz:ihiz+1,ilo:ihi+1] and it must hold that -// 0 <= iloz <= ilo, and ihi <= ihiz < n. +// +// 0 <= iloz <= ilo, and ihi <= ihiz < n. +// // If wantz is false, z is not referenced. // // unconverged indicates whether Dlahqr computed all the eigenvalues ilo to ihi @@ -58,7 +64,9 @@ import ( // which have been successfully computed. // // If unconverged is positive and wantt is true, then on return -// (initial H)*U = U*(final H), (*) +// +// (initial H)*U = U*(final H), (*) +// // where U is an orthogonal matrix. The final H is upper Hessenberg and // H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. // @@ -67,7 +75,9 @@ import ( // H[ilo:unconverged,ilo:unconverged]. // // If unconverged is positive and wantz is true, then on return -// (final Z) = (initial Z)*U, +// +// (final Z) = (initial Z)*U, +// // where U is the orthogonal matrix in (*) regardless of the value of wantt. // // Dlahqr is an internal routine. It is exported for testing purposes. diff --git a/lapack/gonum/dlahr2.go b/lapack/gonum/dlahr2.go index 43b7308f..59214733 100644 --- a/lapack/gonum/dlahr2.go +++ b/lapack/gonum/dlahr2.go @@ -16,16 +16,21 @@ import ( // also the matrix Y = A * V * T. // // The matrix Q is represented as a product of nb elementary reflectors -// Q = H_0 * H_1 * ... * H_{nb-1}. +// +// Q = H_0 * H_1 * ... * H_{nb-1}. +// // Each H_i has the form -// H_i = I - tau[i] * v * vᵀ, +// +// H_i = I - tau[i] * v * vᵀ, +// // where v is a real vector with v[0:i+k-1] = 0 and v[i+k-1] = 1. v[i+k:n] is // stored on exit in A[i+k+1:n,i]. // // The elements of the vectors v together form the (n-k+1)×nb matrix // V which is needed, with T and Y, to apply the transformation to the // unreduced part of the matrix, using an update of the form -// A = (I - V*T*Vᵀ) * (A - Y*Vᵀ). +// +// A = (I - V*T*Vᵀ) * (A - Y*Vᵀ). // // On entry, a contains the n×(n-k+1) general matrix A. On return, the elements // on and above the k-th subdiagonal in the first nb columns are overwritten @@ -35,13 +40,15 @@ import ( // // The contents of A on exit are illustrated by the following example // with n = 7, k = 3 and nb = 2: -// [ a a a a a ] -// [ a a a a a ] -// [ a a a a a ] -// [ h h a a a ] -// [ v0 h a a a ] -// [ v0 v1 a a a ] -// [ v0 v1 a a a ] +// +// [ a a a a a ] +// [ a a a a a ] +// [ a a a a a ] +// [ h h a a a ] +// [ v0 h a a a ] +// [ v0 v1 a a a ] +// [ v0 v1 a a a ] +// // where a denotes an element of the original matrix A, h denotes a // modified element of the upper Hessenberg matrix H, and vi denotes an // element of the vector defining H_i. diff --git a/lapack/gonum/dlaln2.go b/lapack/gonum/dlaln2.go index df8164a7..54d44398 100644 --- a/lapack/gonum/dlaln2.go +++ b/lapack/gonum/dlaln2.go @@ -7,8 +7,10 @@ package gonum import "math" // Dlaln2 solves a linear equation or a system of 2 linear equations of the form -// (ca A - w D) X = scale B if trans == false, -// (ca Aᵀ - w D) X = scale B if trans == true, +// +// (ca A - w D) X = scale B if trans == false, +// (ca Aᵀ - w D) X = scale B if trans == true, +// // where A is a na×na real matrix, ca is a real scalar, D is a na×na diagonal // real matrix, w is a scalar, real if nw == 1, complex if nw == 2, and X and B // are na×1 matrices, real if w is real, complex if w is complex. diff --git a/lapack/gonum/dlange.go b/lapack/gonum/dlange.go index a04b08d6..3a00dce1 100644 --- a/lapack/gonum/dlange.go +++ b/lapack/gonum/dlange.go @@ -11,10 +11,12 @@ import ( ) // Dlange returns the value of the specified norm of a general m×n matrix A: -// lapack.MaxAbs: the maximum absolute value of any element. -// lapack.MaxColumnSum: the maximum column sum of the absolute values of the elements (1-norm). -// lapack.MaxRowSum: the maximum row sum of the absolute values of the elements (infinity-norm). -// lapack.Frobenius: the square root of the sum of the squares of the elements (Frobenius norm). +// +// lapack.MaxAbs: the maximum absolute value of any element. +// lapack.MaxColumnSum: the maximum column sum of the absolute values of the elements (1-norm). +// lapack.MaxRowSum: the maximum row sum of the absolute values of the elements (infinity-norm). +// lapack.Frobenius: the square root of the sum of the squares of the elements (Frobenius norm). +// // If norm == lapack.MaxColumnSum, work must be of length n, and this function will // panic otherwise. There are no restrictions on work for the other matrix norms. func (impl Implementation) Dlange(norm lapack.MatrixNorm, m, n int, a []float64, lda int, work []float64) float64 { diff --git a/lapack/gonum/dlanv2.go b/lapack/gonum/dlanv2.go index f9b5f6f8..360f71b1 100644 --- a/lapack/gonum/dlanv2.go +++ b/lapack/gonum/dlanv2.go @@ -7,8 +7,10 @@ package gonum import "math" // Dlanv2 computes the Schur factorization of a real 2×2 matrix: -// [ a b ] = [ cs -sn ] * [ aa bb ] * [ cs sn ] -// [ c d ] [ sn cs ] [ cc dd ] * [-sn cs ] +// +// [ a b ] = [ cs -sn ] * [ aa bb ] * [ cs sn ] +// [ c d ] [ sn cs ] [ cc dd ] * [-sn cs ] +// // If cc is zero, aa and dd are real eigenvalues of the matrix. Otherwise it // holds that aa = dd and bb*cc < 0, and aa ± sqrt(bb*cc) are complex conjugate // eigenvalues. The real and imaginary parts of the eigenvalues are returned in diff --git a/lapack/gonum/dlapmr.go b/lapack/gonum/dlapmr.go index bd484eaf..73cd82db 100644 --- a/lapack/gonum/dlapmr.go +++ b/lapack/gonum/dlapmr.go @@ -10,9 +10,12 @@ import "gonum.org/v1/gonum/blas/blas64" // k[0],k[1],...,k[m-1] of the integers 0,...,m-1. // // If forward is true, a forward permutation is applied: -// X[k[i],0:n] is moved to X[i,0:n] for i=0,1,...,m-1. +// +// X[k[i],0:n] is moved to X[i,0:n] for i=0,1,...,m-1. +// // If forward is false, a backward permutation is applied: -// X[i,0:n] is moved to X[k[i],0:n] for i=0,1,...,m-1. +// +// X[i,0:n] is moved to X[k[i],0:n] for i=0,1,...,m-1. // // k must have length m, otherwise Dlapmr will panic. func (impl Implementation) Dlapmr(forward bool, m, n int, x []float64, ldx int, k []int) { diff --git a/lapack/gonum/dlapmt.go b/lapack/gonum/dlapmt.go index 55f1567f..4a70e68f 100644 --- a/lapack/gonum/dlapmt.go +++ b/lapack/gonum/dlapmt.go @@ -11,11 +11,11 @@ import "gonum.org/v1/gonum/blas/blas64" // // If forward is true a forward permutation is performed: // -// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. +// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. // // otherwise a backward permutation is performed: // -// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. +// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. // // k must have length n, otherwise Dlapmt will panic. k is zero-indexed. func (impl Implementation) Dlapmt(forward bool, m, n int, x []float64, ldx int, k []int) { diff --git a/lapack/gonum/dlaqr04.go b/lapack/gonum/dlaqr04.go index 9a6da409..3faaa2fc 100644 --- a/lapack/gonum/dlaqr04.go +++ b/lapack/gonum/dlaqr04.go @@ -12,7 +12,9 @@ import ( // Dlaqr04 computes the eigenvalues of a block of an n×n upper Hessenberg matrix // H, and optionally the matrices T and Z from the Schur decomposition -// H = Z T Zᵀ +// +// H = Z T Zᵀ +// // where T is an upper quasi-triangular matrix (the Schur form), and Z is the // orthogonal matrix of Schur vectors. // @@ -24,23 +26,31 @@ import ( // Z[iloz:ihiz+1,ilo:ihi+1], otherwise Z will not be referenced. // // ilo and ihi determine the block of H on which Dlaqr04 operates. It must hold that -// 0 <= ilo <= ihi < n if n > 0, -// ilo == 0 and ihi == -1 if n == 0, +// +// 0 <= ilo <= ihi < n if n > 0, +// ilo == 0 and ihi == -1 if n == 0, +// // and the block must be isolated, that is, -// ilo == 0 or H[ilo,ilo-1] == 0, -// ihi == n-1 or H[ihi+1,ihi] == 0, +// +// ilo == 0 or H[ilo,ilo-1] == 0, +// ihi == n-1 or H[ihi+1,ihi] == 0, +// // otherwise Dlaqr04 will panic. // // wr and wi must have length ihi+1. // // iloz and ihiz specify the rows of Z to which transformations will be applied // if wantz is true. It must hold that -// 0 <= iloz <= ilo, and ihi <= ihiz < n, +// +// 0 <= iloz <= ilo, and ihi <= ihiz < n, +// // otherwise Dlaqr04 will panic. // // work must have length at least lwork and lwork must be -// lwork >= 1 if n <= 11, -// lwork >= n if n > 11, +// +// lwork >= 1 if n <= 11, +// lwork >= n if n > 11, +// // otherwise Dlaqr04 will panic. lwork as large as 6*n may be required for // optimal performance. On return, work[0] will contain the optimal value of // lwork. @@ -76,7 +86,9 @@ import ( // eigenvalues which have been successfully computed. Failures are rare. // // If unconverged is positive and wantt is true, then on return -// (initial H)*U = U*(final H), (*) +// +// (initial H)*U = U*(final H), (*) +// // where U is an orthogonal matrix. The final H is upper Hessenberg and // H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. // @@ -85,17 +97,20 @@ import ( // H[ilo:unconverged,ilo:unconverged]. // // If unconverged is positive and wantz is true, then on return -// (final Z) = (initial Z)*U, +// +// (final Z) = (initial Z)*U, +// // where U is the orthogonal matrix in (*) regardless of the value of wantt. // // References: -// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: -// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix -// Anal. Appl. 23(4) (2002), pp. 929—947 -// URL: http://dx.doi.org/10.1137/S0895479801384573 -// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: -// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 -// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: +// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix +// Anal. Appl. 23(4) (2002), pp. 929—947 +// URL: http://dx.doi.org/10.1137/S0895479801384573 +// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 // // Dlaqr04 is an internal routine. It is exported for testing purposes. func (impl Implementation) Dlaqr04(wantt, wantz bool, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, iloz, ihiz int, z []float64, ldz int, work []float64, lwork int, recur int) (unconverged int) { diff --git a/lapack/gonum/dlaqr1.go b/lapack/gonum/dlaqr1.go index e21373bd..c20c88fd 100644 --- a/lapack/gonum/dlaqr1.go +++ b/lapack/gonum/dlaqr1.go @@ -7,7 +7,9 @@ package gonum import "math" // Dlaqr1 sets v to a scalar multiple of the first column of the product -// (H - (sr1 + i*si1)*I)*(H - (sr2 + i*si2)*I) +// +// (H - (sr1 + i*si1)*I)*(H - (sr2 + i*si2)*I) +// // where H is a 2×2 or 3×3 matrix, I is the identity matrix of the same size, // and i is the imaginary unit. Scaling is done to avoid overflows and most // underflows. diff --git a/lapack/gonum/dlaqr23.go b/lapack/gonum/dlaqr23.go index 58af1e6d..a3fa6661 100644 --- a/lapack/gonum/dlaqr23.go +++ b/lapack/gonum/dlaqr23.go @@ -29,20 +29,28 @@ import ( // // ktop and kbot determine a block [ktop:kbot+1,ktop:kbot+1] along the diagonal // of H. It must hold that -// 0 <= ilo <= ihi < n if n > 0, -// ilo == 0 and ihi == -1 if n == 0, +// +// 0 <= ilo <= ihi < n if n > 0, +// ilo == 0 and ihi == -1 if n == 0, +// // and the block must be isolated, that is, it must hold that -// ktop == 0 or H[ktop,ktop-1] == 0, -// kbot == n-1 or H[kbot+1,kbot] == 0, +// +// ktop == 0 or H[ktop,ktop-1] == 0, +// kbot == n-1 or H[kbot+1,kbot] == 0, +// // otherwise Dlaqr23 will panic. // // nw is the deflation window size. It must hold that -// 0 <= nw <= kbot-ktop+1, +// +// 0 <= nw <= kbot-ktop+1, +// // otherwise Dlaqr23 will panic. // // iloz and ihiz specify the rows of the n×n matrix Z to which transformations // will be applied if wantz is true. It must hold that -// 0 <= iloz <= ktop, and kbot <= ihiz < n, +// +// 0 <= iloz <= ktop, and kbot <= ihiz < n, +// // otherwise Dlaqr23 will panic. // // sr and si must have length kbot+1, otherwise Dlaqr23 will panic. @@ -74,10 +82,10 @@ import ( // stored respectively in sr[kbot-nd+1:kbot+1] and si[kbot-nd+1:kbot+1]. // // References: -// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: -// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl 23(4) (2002), pp. 948—973 -// URL: http://dx.doi.org/10.1137/S0895479801384585 // +// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 func (impl Implementation) Dlaqr23(wantt, wantz bool, n, ktop, kbot, nw int, h []float64, ldh int, iloz, ihiz int, z []float64, ldz int, sr, si []float64, v []float64, ldv int, nh int, t []float64, ldt int, nv int, wv []float64, ldwv int, work []float64, lwork int, recur int) (ns, nd int) { switch { case n < 0: diff --git a/lapack/gonum/dlaqr5.go b/lapack/gonum/dlaqr5.go index 0b0640ee..43b425b8 100644 --- a/lapack/gonum/dlaqr5.go +++ b/lapack/gonum/dlaqr5.go @@ -19,21 +19,25 @@ import ( // // kacc22 specifies the computation mode of far-from-diagonal orthogonal // updates. Permitted values are: -// 0: Dlaqr5 will not accumulate reflections and will not use matrix-matrix -// multiply to update far-from-diagonal matrix entries. -// 1: Dlaqr5 will accumulate reflections and use matrix-matrix multiply to -// update far-from-diagonal matrix entries. -// 2: Dlaqr5 will accumulate reflections, use matrix-matrix multiply to update -// far-from-diagonal matrix entries, and take advantage of 2×2 block -// structure during matrix multiplies. +// +// 0: Dlaqr5 will not accumulate reflections and will not use matrix-matrix +// multiply to update far-from-diagonal matrix entries. +// 1: Dlaqr5 will accumulate reflections and use matrix-matrix multiply to +// update far-from-diagonal matrix entries. +// 2: Dlaqr5 will accumulate reflections, use matrix-matrix multiply to update +// far-from-diagonal matrix entries, and take advantage of 2×2 block +// structure during matrix multiplies. +// // For other values of kacc2 Dlaqr5 will panic. // // n is the order of the Hessenberg matrix H. // // ktop and kbot are indices of the first and last row and column of an isolated // diagonal block upon which the QR sweep will be applied. It must hold that -// ktop == 0, or 0 < ktop <= n-1 and H[ktop, ktop-1] == 0, and -// kbot == n-1, or 0 <= kbot < n-1 and H[kbot+1, kbot] == 0, +// +// ktop == 0, or 0 < ktop <= n-1 and H[ktop, ktop-1] == 0, and +// kbot == n-1, or 0 <= kbot < n-1 and H[kbot+1, kbot] == 0, +// // otherwise Dlaqr5 will panic. // // nshfts is the number of simultaneous shifts. It must be positive and even, diff --git a/lapack/gonum/dlarf.go b/lapack/gonum/dlarf.go index 0d851826..16581a1b 100644 --- a/lapack/gonum/dlarf.go +++ b/lapack/gonum/dlarf.go @@ -10,10 +10,14 @@ import ( ) // Dlarf applies an elementary reflector H to an m×n matrix C: -// C = H * C if side == blas.Left -// C = C * H if side == blas.Right +// +// C = H * C if side == blas.Left +// C = C * H if side == blas.Right +// // H is represented in the form -// H = I - tau * v * vᵀ +// +// H = I - tau * v * vᵀ +// // where tau is a scalar and v is a vector. // // work must have length at least m if side == blas.Left and diff --git a/lapack/gonum/dlarfb.go b/lapack/gonum/dlarfb.go index d3ddc8e4..eb43ca74 100644 --- a/lapack/gonum/dlarfb.go +++ b/lapack/gonum/dlarfb.go @@ -13,36 +13,48 @@ import ( // Dlarfb applies a block reflector to a matrix. // // In the call to Dlarfb, the mxn c is multiplied by the implicitly defined matrix h as follows: -// c = h * c if side == Left and trans == NoTrans -// c = c * h if side == Right and trans == NoTrans -// c = hᵀ * c if side == Left and trans == Trans -// c = c * hᵀ if side == Right and trans == Trans +// +// c = h * c if side == Left and trans == NoTrans +// c = c * h if side == Right and trans == NoTrans +// c = hᵀ * c if side == Left and trans == Trans +// c = c * hᵀ if side == Right and trans == Trans +// // h is a product of elementary reflectors. direct sets the direction of multiplication -// h = h_1 * h_2 * ... * h_k if direct == Forward -// h = h_k * h_k-1 * ... * h_1 if direct == Backward +// +// h = h_1 * h_2 * ... * h_k if direct == Forward +// h = h_k * h_k-1 * ... * h_1 if direct == Backward +// // The combination of direct and store defines the orientation of the elementary // reflectors. In all cases the ones on the diagonal are implicitly represented. // // If direct == lapack.Forward and store == lapack.ColumnWise -// V = [ 1 ] -// [v1 1 ] -// [v1 v2 1] -// [v1 v2 v3] -// [v1 v2 v3] +// +// V = [ 1 ] +// [v1 1 ] +// [v1 v2 1] +// [v1 v2 v3] +// [v1 v2 v3] +// // If direct == lapack.Forward and store == lapack.RowWise -// V = [ 1 v1 v1 v1 v1] -// [ 1 v2 v2 v2] -// [ 1 v3 v3] +// +// V = [ 1 v1 v1 v1 v1] +// [ 1 v2 v2 v2] +// [ 1 v3 v3] +// // If direct == lapack.Backward and store == lapack.ColumnWise -// V = [v1 v2 v3] -// [v1 v2 v3] -// [ 1 v2 v3] -// [ 1 v3] -// [ 1] +// +// V = [v1 v2 v3] +// [v1 v2 v3] +// [ 1 v2 v3] +// [ 1 v3] +// [ 1] +// // If direct == lapack.Backward and store == lapack.RowWise -// V = [v1 v1 1 ] -// [v2 v2 v2 1 ] -// [v3 v3 v3 v3 1] +// +// V = [v1 v1 1 ] +// [v2 v2 v2 1 ] +// [v3 v3 v3 v3 1] +// // An elementary reflector can be explicitly constructed by extracting the // corresponding elements of v, placing a 1 where the diagonal would be, and // placing zeros in the remaining elements. diff --git a/lapack/gonum/dlarfg.go b/lapack/gonum/dlarfg.go index 3f614b04..74ad111d 100644 --- a/lapack/gonum/dlarfg.go +++ b/lapack/gonum/dlarfg.go @@ -12,11 +12,15 @@ import ( // Dlarfg generates an elementary reflector for a Householder matrix. It creates // a real elementary reflector of order n such that -// H * (alpha) = (beta) -// ( x) ( 0) -// Hᵀ * H = I +// +// H * (alpha) = (beta) +// ( x) ( 0) +// Hᵀ * H = I +// // H is represented in the form -// H = 1 - tau * (1; v) * (1 vᵀ) +// +// H = 1 - tau * (1; v) * (1 vᵀ) +// // where tau is a real scalar. // // On entry, x contains the vector x, on exit it contains v. diff --git a/lapack/gonum/dlarft.go b/lapack/gonum/dlarft.go index 1d84fb5d..921a5a3d 100644 --- a/lapack/gonum/dlarft.go +++ b/lapack/gonum/dlarft.go @@ -12,11 +12,14 @@ import ( // Dlarft forms the triangular factor T of a block reflector H, storing the answer // in t. -// H = I - V * T * Vᵀ if store == lapack.ColumnWise -// H = I - Vᵀ * T * V if store == lapack.RowWise +// +// H = I - V * T * Vᵀ if store == lapack.ColumnWise +// H = I - Vᵀ * T * V if store == lapack.RowWise +// // H is defined by a product of the elementary reflectors where -// H = H_0 * H_1 * ... * H_{k-1} if direct == lapack.Forward -// H = H_{k-1} * ... * H_1 * H_0 if direct == lapack.Backward +// +// H = H_0 * H_1 * ... * H_{k-1} if direct == lapack.Forward +// H = H_{k-1} * ... * H_1 * H_0 if direct == lapack.Backward // // t is a k×k triangular matrix. t is upper triangular if direct = lapack.Forward // and lower triangular otherwise. This function will panic if t is not of diff --git a/lapack/gonum/dlarfx.go b/lapack/gonum/dlarfx.go index a018593f..4e40dad1 100644 --- a/lapack/gonum/dlarfx.go +++ b/lapack/gonum/dlarfx.go @@ -11,7 +11,9 @@ import "gonum.org/v1/gonum/blas" // than 11. // // H is represented in the form -// H = I - tau * v * vᵀ, +// +// H = I - tau * v * vᵀ, +// // where tau is a real scalar and v is a real vector. If tau = 0, then H is // taken to be the identity matrix. // diff --git a/lapack/gonum/dlartg.go b/lapack/gonum/dlartg.go index cdd8ffa7..0239a48e 100644 --- a/lapack/gonum/dlartg.go +++ b/lapack/gonum/dlartg.go @@ -7,15 +7,17 @@ package gonum import "math" // Dlartg generates a plane rotation so that -// [ cs sn] * [f] = [r] -// [-sn cs] [g] = [0] +// +// [ cs sn] * [f] = [r] +// [-sn cs] [g] = [0] +// // where cs*cs + sn*sn = 1. // // This is a more accurate version of BLAS Drotg, with the other differences // that -// - if g = 0, then cs = 1 and sn = 0 -// - if f = 0 and g != 0, then cs = 0 and sn = 1 -// - r takes the sign of f and so cs is always non-negative +// - if g = 0, then cs = 1 and sn = 0 +// - if f = 0 and g != 0, then cs = 0 and sn = 1 +// - r takes the sign of f and so cs is always non-negative // // Dlartg is an internal routine. It is exported for testing purposes. func (impl Implementation) Dlartg(f, g float64) (cs, sn, r float64) { diff --git a/lapack/gonum/dlas2.go b/lapack/gonum/dlas2.go index 9922b4aa..a819fa35 100644 --- a/lapack/gonum/dlas2.go +++ b/lapack/gonum/dlas2.go @@ -7,8 +7,10 @@ package gonum import "math" // Dlas2 computes the singular values of the 2×2 matrix defined by -// [F G] -// [0 H] +// +// [F G] +// [0 H] +// // The smaller and larger singular values are returned in that order. // // Dlas2 is an internal routine. It is exported for testing purposes. diff --git a/lapack/gonum/dlasq2.go b/lapack/gonum/dlasq2.go index 765ae77d..e3870b1d 100644 --- a/lapack/gonum/dlasq2.go +++ b/lapack/gonum/dlasq2.go @@ -22,13 +22,14 @@ import ( // symmetric tridiagonal to which it is similar. // // info returns a status error. The return codes mean as follows: -// 0: The algorithm completed successfully. -// 1: A split was marked by a positive value in e. -// 2: Current block of Z not diagonalized after 100*n iterations (in inner -// while loop). On exit Z holds a qd array with the same eigenvalues as -// the given Z. -// 3: Termination criterion of outer while loop not met (program created more -// than N unreduced blocks). +// +// 0: The algorithm completed successfully. +// 1: A split was marked by a positive value in e. +// 2: Current block of Z not diagonalized after 100*n iterations (in inner +// while loop). On exit Z holds a qd array with the same eigenvalues as +// the given Z. +// 3: Termination criterion of outer while loop not met (program created more +// than N unreduced blocks). // // z must have length at least 4*n, and must not contain any negative elements. // Dlasq2 will panic otherwise. diff --git a/lapack/gonum/dlasr.go b/lapack/gonum/dlasr.go index 6bacca5e..3aab41f8 100644 --- a/lapack/gonum/dlasr.go +++ b/lapack/gonum/dlasr.go @@ -17,40 +17,48 @@ import ( // The exact value of P depends on the value of pivot, but in all cases P is // implicitly represented by a series of 2×2 rotation matrices. The entries of // rotation matrix k are defined by s[k] and c[k] -// R(k) = [ c[k] s[k]] -// [-s[k] s[k]] +// +// R(k) = [ c[k] s[k]] +// [-s[k] s[k]] +// // If direct == lapack.Forward, the rotation matrices are applied as // P = P(z-1) * ... * P(2) * P(1), while if direct == lapack.Backward they are // applied as P = P(1) * P(2) * ... * P(n). // // pivot defines the mapping of the elements in R(k) to P(k). // If pivot == lapack.Variable, the rotation is performed for the (k, k+1) plane. -// P(k) = [1 ] -// [ ... ] -// [ 1 ] -// [ c[k] s[k] ] -// [ -s[k] c[k] ] -// [ 1 ] -// [ ... ] -// [ 1] +// +// P(k) = [1 ] +// [ ... ] +// [ 1 ] +// [ c[k] s[k] ] +// [ -s[k] c[k] ] +// [ 1 ] +// [ ... ] +// [ 1] +// // if pivot == lapack.Top, the rotation is performed for the (1, k+1) plane, -// P(k) = [c[k] s[k] ] -// [ 1 ] -// [ ... ] -// [ 1 ] -// [-s[k] c[k] ] -// [ 1 ] -// [ ... ] -// [ 1] +// +// P(k) = [c[k] s[k] ] +// [ 1 ] +// [ ... ] +// [ 1 ] +// [-s[k] c[k] ] +// [ 1 ] +// [ ... ] +// [ 1] +// // and if pivot == lapack.Bottom, the rotation is performed for the (k, z) plane. -// P(k) = [1 ] -// [ ... ] -// [ 1 ] -// [ c[k] s[k]] -// [ 1 ] -// [ ... ] -// [ 1 ] -// [ -s[k] c[k]] +// +// P(k) = [1 ] +// [ ... ] +// [ 1 ] +// [ c[k] s[k]] +// [ 1 ] +// [ ... ] +// [ 1 ] +// [ -s[k] c[k]] +// // s and c have length m - 1 if side == blas.Left, and n - 1 if side == blas.Right. // // Dlasr is an internal routine. It is exported for testing purposes. diff --git a/lapack/gonum/dlassq.go b/lapack/gonum/dlassq.go index 7263219b..3d982c3c 100644 --- a/lapack/gonum/dlassq.go +++ b/lapack/gonum/dlassq.go @@ -8,7 +8,9 @@ import "math" // Dlassq updates a sum of squares represented in scaled form. Dlassq returns // the values scl and smsq such that -// scl^2*smsq = X[0]^2 + ... + X[n-1]^2 + scale^2*sumsq +// +// scl^2*smsq = X[0]^2 + ... + X[n-1]^2 + scale^2*sumsq +// // The value of sumsq is assumed to be non-negative. // // Dlassq is an internal routine. It is exported for testing purposes. diff --git a/lapack/gonum/dlasv2.go b/lapack/gonum/dlasv2.go index 204af193..cc7ceea0 100644 --- a/lapack/gonum/dlasv2.go +++ b/lapack/gonum/dlasv2.go @@ -7,8 +7,10 @@ package gonum import "math" // Dlasv2 computes the singular value decomposition of a 2×2 matrix. -// [ csl snl] [f g] [csr -snr] = [ssmax 0] -// [-snl csl] [0 h] [snr csr] = [ 0 ssmin] +// +// [ csl snl] [f g] [csr -snr] = [ssmax 0] +// [-snl csl] [0 h] [snr csr] = [ 0 ssmin] +// // ssmax is the larger absolute singular value, and ssmin is the smaller absolute // singular value. [cls, snl] and [csr, snr] are the left and right singular vectors. // diff --git a/lapack/gonum/dlasy2.go b/lapack/gonum/dlasy2.go index d95402ca..160b68b8 100644 --- a/lapack/gonum/dlasy2.go +++ b/lapack/gonum/dlasy2.go @@ -12,10 +12,12 @@ import ( // Dlasy2 solves the Sylvester matrix equation where the matrices are of order 1 // or 2. It computes the unknown n1×n2 matrix X so that -// TL*X + sgn*X*TR = scale*B if tranl == false and tranr == false, -// TLᵀ*X + sgn*X*TR = scale*B if tranl == true and tranr == false, -// TL*X + sgn*X*TRᵀ = scale*B if tranl == false and tranr == true, -// TLᵀ*X + sgn*X*TRᵀ = scale*B if tranl == true and tranr == true, +// +// TL*X + sgn*X*TR = scale*B if tranl == false and tranr == false, +// TLᵀ*X + sgn*X*TR = scale*B if tranl == true and tranr == false, +// TL*X + sgn*X*TRᵀ = scale*B if tranl == false and tranr == true, +// TLᵀ*X + sgn*X*TRᵀ = scale*B if tranl == true and tranr == true, +// // where TL is n1×n1, TR is n2×n2, B is n1×n2, and 1 <= n1,n2 <= 2. // // isgn must be 1 or -1, and n1 and n2 must be 0, 1, or 2, but these conditions diff --git a/lapack/gonum/dlatbs.go b/lapack/gonum/dlatbs.go index 0d3cf9f0..19300faf 100644 --- a/lapack/gonum/dlatbs.go +++ b/lapack/gonum/dlatbs.go @@ -12,8 +12,10 @@ import ( ) // Dlatbs solves a triangular banded system of equations -// A * x = s*b if trans == blas.NoTrans -// Aᵀ * x = s*b if trans == blas.Trans or blas.ConjTrans +// +// A * x = s*b if trans == blas.NoTrans +// Aᵀ * x = s*b if trans == blas.Trans or blas.ConjTrans +// // where A is an upper or lower triangular band matrix, x and b are n-element // vectors, and s is a scaling factor chosen so that the components of x will be // less than the overflow threshold. diff --git a/lapack/gonum/dlatdf.go b/lapack/gonum/dlatdf.go index d6d99e80..83422912 100644 --- a/lapack/gonum/dlatdf.go +++ b/lapack/gonum/dlatdf.go @@ -12,12 +12,16 @@ import ( ) // Dlatdf computes a contribution to the reciprocal Dif-estimate by solving -// Z * x = h - f +// +// Z * x = h - f +// // and choosing the vector h such that the norm of x is as large as possible. // // The n×n matrix Z is represented by its LU factorization as computed by Dgetc2 // and has the form -// Z = P * L * U * Q +// +// Z = P * L * U * Q +// // where P and Q are permutation matrices, L is lower triangular with unit // diagonal elements and U is upper triangular. // diff --git a/lapack/gonum/dlatrd.go b/lapack/gonum/dlatrd.go index 1e057aa3..195be09c 100644 --- a/lapack/gonum/dlatrd.go +++ b/lapack/gonum/dlatrd.go @@ -11,7 +11,9 @@ import ( // Dlatrd reduces nb rows and columns of a real n×n symmetric matrix A to symmetric // tridiagonal form. It computes the orthonormal similarity transformation -// Qᵀ * A * Q +// +// Qᵀ * A * Q +// // and returns the matrices V and W to apply to the unreduced part of A. If // uplo == blas.Upper, the upper triangle is supplied and the last nb rows are // reduced. If uplo == blas.Lower, the lower triangle is supplied and the first @@ -23,18 +25,20 @@ import ( // set to 1, and the remaining elements contain the data to construct Q. // // If uplo == blas.Upper, with n = 5 and nb = 2 on exit a is -// [ a a a v4 v5] -// [ a a v4 v5] -// [ a 1 v5] -// [ d 1] -// [ d] +// +// [ a a a v4 v5] +// [ a a v4 v5] +// [ a 1 v5] +// [ d 1] +// [ d] // // If uplo == blas.Lower, with n = 5 and nb = 2, on exit a is -// [ d ] -// [ 1 d ] -// [v1 1 a ] -// [v1 v2 a a ] -// [v1 v2 a a a] +// +// [ d ] +// [ 1 d ] +// [v1 1 a ] +// [v1 v2 a a ] +// [v1 v2 a a a] // // e contains the superdiagonal elements of the reduced matrix. If uplo == blas.Upper, // e[n-nb:n-1] contains the last nb columns of the reduced matrix, while if @@ -51,18 +55,25 @@ import ( // // The matrix Q is represented as a product of elementary reflectors. Each reflector // H has the form -// I - tau * v * vᵀ +// +// I - tau * v * vᵀ +// // If uplo == blas.Upper, -// Q = H_{n-1} * H_{n-2} * ... * H_{n-nb} +// +// Q = H_{n-1} * H_{n-2} * ... * H_{n-nb} +// // where v[:i-1] is stored in A[:i-1,i], v[i-1] = 1, and v[i:n] = 0. // // If uplo == blas.Lower, -// Q = H_0 * H_1 * ... * H_{nb-1} +// +// Q = H_0 * H_1 * ... * H_{nb-1} +// // where v[:i+1] = 0, v[i+1] = 1, and v[i+2:n] is stored in A[i+2:n,i]. // // The vectors v form the n×nb matrix V which is used with W to apply a // symmetric rank-2 update to the unreduced part of A -// A = A - V * Wᵀ - W * Vᵀ +// +// A = A - V * Wᵀ - W * Vᵀ // // Dlatrd is an internal routine. It is exported for testing purposes. func (impl Implementation) Dlatrd(uplo blas.Uplo, n, nb int, a []float64, lda int, e, tau, w []float64, ldw int) { diff --git a/lapack/gonum/dlatrs.go b/lapack/gonum/dlatrs.go index 73970bcf..37ac2fe7 100644 --- a/lapack/gonum/dlatrs.go +++ b/lapack/gonum/dlatrs.go @@ -13,8 +13,10 @@ import ( // Dlatrs solves a triangular system of equations scaled to prevent overflow. It // solves -// A * x = scale * b if trans == blas.NoTrans -// Aᵀ * x = scale * b if trans == blas.Trans +// +// A * x = scale * b if trans == blas.NoTrans +// Aᵀ * x = scale * b if trans == blas.Trans +// // where the scale s is set for numeric stability. // // A is an n×n triangular matrix. On entry, the slice x contains the values of diff --git a/lapack/gonum/dlauu2.go b/lapack/gonum/dlauu2.go index 24e98ddd..b70a8420 100644 --- a/lapack/gonum/dlauu2.go +++ b/lapack/gonum/dlauu2.go @@ -10,8 +10,10 @@ import ( ) // Dlauu2 computes the product -// U * Uᵀ if uplo is blas.Upper -// Lᵀ * L if uplo is blas.Lower +// +// U * Uᵀ if uplo is blas.Upper +// Lᵀ * L if uplo is blas.Lower +// // where U or L is stored in the upper or lower triangular part of A. // Only the upper or lower triangle of the result is stored, overwriting // the corresponding factor in A. diff --git a/lapack/gonum/dlauum.go b/lapack/gonum/dlauum.go index 995fdc05..575ed7c8 100644 --- a/lapack/gonum/dlauum.go +++ b/lapack/gonum/dlauum.go @@ -10,8 +10,10 @@ import ( ) // Dlauum computes the product -// U * Uᵀ if uplo is blas.Upper -// Lᵀ * L if uplo is blas.Lower +// +// U * Uᵀ if uplo is blas.Upper +// Lᵀ * L if uplo is blas.Lower +// // where U or L is stored in the upper or lower triangular part of A. // Only the upper or lower triangle of the result is stored, overwriting // the corresponding factor in A. diff --git a/lapack/gonum/dorg2l.go b/lapack/gonum/dorg2l.go index a20765a9..fdb37af2 100644 --- a/lapack/gonum/dorg2l.go +++ b/lapack/gonum/dorg2l.go @@ -11,7 +11,9 @@ import ( // Dorg2l generates an m×n matrix Q with orthonormal columns which is defined // as the last n columns of a product of k elementary reflectors of order m. -// Q = H_{k-1} * ... * H_1 * H_0 +// +// Q = H_{k-1} * ... * H_1 * H_0 +// // See Dgelqf for more information. It must be that m >= n >= k. // // tau contains the scalar reflectors computed by Dgeqlf. tau must have length diff --git a/lapack/gonum/dorg2r.go b/lapack/gonum/dorg2r.go index de447757..36addec7 100644 --- a/lapack/gonum/dorg2r.go +++ b/lapack/gonum/dorg2r.go @@ -11,7 +11,9 @@ import ( // Dorg2r generates an m×n matrix Q with orthonormal columns defined by the // product of elementary reflectors as computed by Dgeqrf. -// Q = H_0 * H_1 * ... * H_{k-1} +// +// Q = H_0 * H_1 * ... * H_{k-1} +// // len(tau) >= k, 0 <= k <= n, 0 <= n <= m, len(work) >= n. // Dorg2r will panic if these conditions are not met. // diff --git a/lapack/gonum/dorghr.go b/lapack/gonum/dorghr.go index fd65531b..8f0dd452 100644 --- a/lapack/gonum/dorghr.go +++ b/lapack/gonum/dorghr.go @@ -6,7 +6,8 @@ package gonum // Dorghr generates an n×n orthogonal matrix Q which is defined as the product // of ihi-ilo elementary reflectors: -// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. // // a and lda represent an n×n matrix that contains the elementary reflectors, as // returned by Dgehrd. On return, a is overwritten by the n×n orthogonal matrix @@ -15,8 +16,9 @@ package gonum // // ilo and ihi must have the same values as in the previous call of Dgehrd. It // must hold that -// 0 <= ilo <= ihi < n if n > 0, -// ilo = 0, ihi = -1 if n == 0. +// +// 0 <= ilo <= ihi < n if n > 0, +// ilo = 0, ihi = -1 if n == 0. // // tau contains the scalar factors of the elementary reflectors, as returned by // Dgehrd. tau must have length n-1. diff --git a/lapack/gonum/dorgl2.go b/lapack/gonum/dorgl2.go index b5566b9d..ea6dbe52 100644 --- a/lapack/gonum/dorgl2.go +++ b/lapack/gonum/dorgl2.go @@ -11,7 +11,9 @@ import ( // Dorgl2 generates an m×n matrix Q with orthonormal rows defined by the // first m rows product of elementary reflectors as computed by Dgelqf. -// Q = H_0 * H_1 * ... * H_{k-1} +// +// Q = H_0 * H_1 * ... * H_{k-1} +// // len(tau) >= k, 0 <= k <= m, 0 <= m <= n, len(work) >= m. // Dorgl2 will panic if these conditions are not met. // diff --git a/lapack/gonum/dorglq.go b/lapack/gonum/dorglq.go index d7739fb9..1128eb30 100644 --- a/lapack/gonum/dorglq.go +++ b/lapack/gonum/dorglq.go @@ -11,7 +11,9 @@ import ( // Dorglq generates an m×n matrix Q with orthonormal columns defined by the // product of elementary reflectors as computed by Dgelqf. -// Q = H_0 * H_1 * ... * H_{k-1} +// +// Q = H_0 * H_1 * ... * H_{k-1} +// // Dorglq is the blocked version of Dorgl2 that makes greater use of level-3 BLAS // routines. // diff --git a/lapack/gonum/dorgql.go b/lapack/gonum/dorgql.go index 6927ba4c..d5ef17f3 100644 --- a/lapack/gonum/dorgql.go +++ b/lapack/gonum/dorgql.go @@ -11,10 +11,13 @@ import ( // Dorgql generates the m×n matrix Q with orthonormal columns defined as the // last n columns of a product of k elementary reflectors of order m -// Q = H_{k-1} * ... * H_1 * H_0. +// +// Q = H_{k-1} * ... * H_1 * H_0. // // It must hold that -// 0 <= k <= n <= m, +// +// 0 <= k <= n <= m, +// // and Dorgql will panic otherwise. // // On entry, the (n-k+i)-th column of A must contain the vector which defines diff --git a/lapack/gonum/dorgqr.go b/lapack/gonum/dorgqr.go index 7725cf4c..db1a4807 100644 --- a/lapack/gonum/dorgqr.go +++ b/lapack/gonum/dorgqr.go @@ -11,7 +11,9 @@ import ( // Dorgqr generates an m×n matrix Q with orthonormal columns defined by the // product of elementary reflectors -// Q = H_0 * H_1 * ... * H_{k-1} +// +// Q = H_0 * H_1 * ... * H_{k-1} +// // as computed by Dgeqrf. // Dorgqr is the blocked version of Dorg2r that makes greater use of level-3 BLAS // routines. diff --git a/lapack/gonum/dorgr2.go b/lapack/gonum/dorgr2.go index 1cdb6e80..6f2790cb 100644 --- a/lapack/gonum/dorgr2.go +++ b/lapack/gonum/dorgr2.go @@ -11,7 +11,9 @@ import ( // Dorgr2 generates an m×n real matrix Q with orthonormal rows, which is defined // as the last m rows of a product of k elementary reflectors of order n -// Q = H_0 * H_1 * ... * H_{k-1} +// +// Q = H_0 * H_1 * ... * H_{k-1} +// // as returned by Dgerqf. // // On entry, the (m-k+i)-th row of A must contain the vector which defines the @@ -22,7 +24,9 @@ import ( // reflector H_i, as returned by Dgerqf. // // It must hold that -// n >= m >= k >= 0, +// +// n >= m >= k >= 0, +// // the length of tau must be k and the length of work must be m, otherwise // Dorgr2 will panic. // diff --git a/lapack/gonum/dorgtr.go b/lapack/gonum/dorgtr.go index 483fbcae..d2cef99a 100644 --- a/lapack/gonum/dorgtr.go +++ b/lapack/gonum/dorgtr.go @@ -10,8 +10,10 @@ import "gonum.org/v1/gonum/blas" // of n-1 elementary reflectors of order n as returned by Dsytrd. // // The construction of Q depends on the value of uplo: -// Q = H_{n-1} * ... * H_1 * H_0 if uplo == blas.Upper -// Q = H_0 * H_1 * ... * H_{n-1} if uplo == blas.Lower +// +// Q = H_{n-1} * ... * H_1 * H_0 if uplo == blas.Upper +// Q = H_0 * H_1 * ... * H_{n-1} if uplo == blas.Lower +// // where H_i is constructed from the elementary reflectors as computed by Dsytrd. // See the documentation for Dsytrd for more information. // diff --git a/lapack/gonum/dorm2r.go b/lapack/gonum/dorm2r.go index 8311f742..e9293131 100644 --- a/lapack/gonum/dorm2r.go +++ b/lapack/gonum/dorm2r.go @@ -8,10 +8,12 @@ import "gonum.org/v1/gonum/blas" // Dorm2r multiplies a general matrix C by an orthogonal matrix from a QR factorization // determined by Dgeqrf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Qᵀ * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Qᵀ if side == blas.Right and trans == blas.Trans +// +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Qᵀ * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Qᵀ if side == blas.Right and trans == blas.Trans +// // If side == blas.Left, a is a matrix of size m×k, and if side == blas.Right // a is of size n×k. // diff --git a/lapack/gonum/dormbr.go b/lapack/gonum/dormbr.go index 20f3ca57..8be7040c 100644 --- a/lapack/gonum/dormbr.go +++ b/lapack/gonum/dormbr.go @@ -13,15 +13,17 @@ import ( // decomposition computed by Dgebrd. // // Dormbr overwrites the m×n matrix C with -// Q * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.NoTrans -// C * Q if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.NoTrans -// Qᵀ * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.Trans -// C * Qᵀ if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.Trans // -// P * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.NoTrans -// C * P if vect == lapack.ApplyP, side == blas.Right, and trans == blas.NoTrans -// Pᵀ * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.Trans -// C * Pᵀ if vect == lapack.ApplyP, side == blas.Right, and trans == blas.Trans +// Q * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.NoTrans +// C * Q if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.NoTrans +// Qᵀ * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.Trans +// C * Qᵀ if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.Trans +// +// P * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.NoTrans +// C * P if vect == lapack.ApplyP, side == blas.Right, and trans == blas.NoTrans +// Pᵀ * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.Trans +// C * Pᵀ if vect == lapack.ApplyP, side == blas.Right, and trans == blas.Trans +// // where P and Q are the orthogonal matrices determined by Dgebrd when reducing // a matrix A to bidiagonal form: A = Q * B * Pᵀ. See Dgebrd for the // definitions of Q and P. diff --git a/lapack/gonum/dormhr.go b/lapack/gonum/dormhr.go index ac4d3ae6..318a57ad 100644 --- a/lapack/gonum/dormhr.go +++ b/lapack/gonum/dormhr.go @@ -7,24 +7,29 @@ package gonum import "gonum.org/v1/gonum/blas" // Dormhr multiplies an m×n general matrix C with an nq×nq orthogonal matrix Q -// Q * C if side == blas.Left and trans == blas.NoTrans, -// Qᵀ * C if side == blas.Left and trans == blas.Trans, -// C * Q if side == blas.Right and trans == blas.NoTrans, -// C * Qᵀ if side == blas.Right and trans == blas.Trans, +// +// Q * C if side == blas.Left and trans == blas.NoTrans, +// Qᵀ * C if side == blas.Left and trans == blas.Trans, +// C * Q if side == blas.Right and trans == blas.NoTrans, +// C * Qᵀ if side == blas.Right and trans == blas.Trans, +// // where nq == m if side == blas.Left and nq == n if side == blas.Right. // // Q is defined implicitly as the product of ihi-ilo elementary reflectors, as // returned by Dgehrd: -// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// // Q is equal to the identity matrix except in the submatrix // Q[ilo+1:ihi+1,ilo+1:ihi+1]. // // ilo and ihi must have the same values as in the previous call of Dgehrd. It // must hold that -// 0 <= ilo <= ihi < m if m > 0 and side == blas.Left, -// ilo = 0 and ihi = -1 if m = 0 and side == blas.Left, -// 0 <= ilo <= ihi < n if n > 0 and side == blas.Right, -// ilo = 0 and ihi = -1 if n = 0 and side == blas.Right. +// +// 0 <= ilo <= ihi < m if m > 0 and side == blas.Left, +// ilo = 0 and ihi = -1 if m = 0 and side == blas.Left, +// 0 <= ilo <= ihi < n if n > 0 and side == blas.Right, +// ilo = 0 and ihi = -1 if n = 0 and side == blas.Right. // // a and lda represent an m×m matrix if side == blas.Left and an n×n matrix if // side == blas.Right. The matrix contains vectors which define the elementary diff --git a/lapack/gonum/dorml2.go b/lapack/gonum/dorml2.go index df474ca4..665e2102 100644 --- a/lapack/gonum/dorml2.go +++ b/lapack/gonum/dorml2.go @@ -8,10 +8,12 @@ import "gonum.org/v1/gonum/blas" // Dorml2 multiplies a general matrix C by an orthogonal matrix from an LQ factorization // determined by Dgelqf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Qᵀ * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Qᵀ if side == blas.Right and trans == blas.Trans +// +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Qᵀ * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Qᵀ if side == blas.Right and trans == blas.Trans +// // If side == blas.Left, a is a matrix of side k×m, and if side == blas.Right // a is of size k×n. // diff --git a/lapack/gonum/dormlq.go b/lapack/gonum/dormlq.go index a86a8a56..37b49973 100644 --- a/lapack/gonum/dormlq.go +++ b/lapack/gonum/dormlq.go @@ -11,10 +11,12 @@ import ( // Dormlq multiplies the matrix C by the orthogonal matrix Q defined by the // slices a and tau. A and tau are as returned from Dgelqf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Qᵀ * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Qᵀ if side == blas.Right and trans == blas.Trans +// +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Qᵀ * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Qᵀ if side == blas.Right and trans == blas.Trans +// // If side == blas.Left, A is a matrix of side k×m, and if side == blas.Right // A is of size k×n. This uses a blocked algorithm. // diff --git a/lapack/gonum/dormqr.go b/lapack/gonum/dormqr.go index ae67de5f..c1e5668b 100644 --- a/lapack/gonum/dormqr.go +++ b/lapack/gonum/dormqr.go @@ -10,12 +10,15 @@ import ( ) // Dormqr multiplies an m×n matrix C by an orthogonal matrix Q as -// C = Q * C if side == blas.Left and trans == blas.NoTrans, -// C = Qᵀ * C if side == blas.Left and trans == blas.Trans, -// C = C * Q if side == blas.Right and trans == blas.NoTrans, -// C = C * Qᵀ if side == blas.Right and trans == blas.Trans, +// +// C = Q * C if side == blas.Left and trans == blas.NoTrans, +// C = Qᵀ * C if side == blas.Left and trans == blas.Trans, +// C = C * Q if side == blas.Right and trans == blas.NoTrans, +// C = C * Qᵀ if side == blas.Right and trans == blas.Trans, +// // where Q is defined as the product of k elementary reflectors -// Q = H_0 * H_1 * ... * H_{k-1}. +// +// Q = H_0 * H_1 * ... * H_{k-1}. // // If side == blas.Left, A is an m×k matrix and 0 <= k <= m. // If side == blas.Right, A is an n×k matrix and 0 <= k <= n. diff --git a/lapack/gonum/dormr2.go b/lapack/gonum/dormr2.go index 4bf0d879..59d4d4f1 100644 --- a/lapack/gonum/dormr2.go +++ b/lapack/gonum/dormr2.go @@ -8,10 +8,12 @@ import "gonum.org/v1/gonum/blas" // Dormr2 multiplies a general matrix C by an orthogonal matrix from a RQ factorization // determined by Dgerqf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Qᵀ * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Qᵀ if side == blas.Right and trans == blas.Trans +// +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Qᵀ * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Qᵀ if side == blas.Right and trans == blas.Trans +// // If side == blas.Left, a is a matrix of size k×m, and if side == blas.Right // a is of size k×n. // diff --git a/lapack/gonum/dpbcon.go b/lapack/gonum/dpbcon.go index f55a596c..0ed63e62 100644 --- a/lapack/gonum/dpbcon.go +++ b/lapack/gonum/dpbcon.go @@ -14,11 +14,14 @@ import ( // Dpbcon returns an estimate of the reciprocal of the condition number (in the // 1-norm) of an n×n symmetric positive definite band matrix using the Cholesky // factorization -// A = Uᵀ*U if uplo == blas.Upper -// A = L*Lᵀ if uplo == blas.Lower +// +// A = Uᵀ*U if uplo == blas.Upper +// A = L*Lᵀ if uplo == blas.Lower +// // computed by Dpbtrf. The estimate is obtained for norm(inv(A)), and the // reciprocal of the condition number is computed as -// rcond = 1 / (anorm * norm(inv(A))). +// +// rcond = 1 / (anorm * norm(inv(A))). // // The length of work must be at least 3*n and the length of iwork must be at // least n. diff --git a/lapack/gonum/dpbtf2.go b/lapack/gonum/dpbtf2.go index e54bc46f..8150e568 100644 --- a/lapack/gonum/dpbtf2.go +++ b/lapack/gonum/dpbtf2.go @@ -14,8 +14,10 @@ import ( // Dpbtf2 computes the Cholesky factorization of a symmetric positive banded // matrix ab. The matrix ab is n×n with kd diagonal bands. The Cholesky // factorization computed is -// A = Uᵀ * U if ul == blas.Upper -// A = L * Lᵀ if ul == blas.Lower +// +// A = Uᵀ * U if ul == blas.Upper +// A = L * Lᵀ if ul == blas.Lower +// // ul also specifies the storage of ab. If ul == blas.Upper, then // ab is stored as an upper-triangular banded matrix with kd super-diagonals, // and if ul == blas.Lower, ab is stored as a lower-triangular banded matrix @@ -27,21 +29,21 @@ import ( // The resulting Cholesky decomposition is stored in the same elements as the // input band matrix (a11 becomes u11 or l11, etc.). // -// ul = blas.Upper -// a11 a12 a13 -// a22 a23 a24 -// a33 a34 a35 -// a44 a45 a46 -// a55 a56 * -// a66 * * +// ul = blas.Upper +// a11 a12 a13 +// a22 a23 a24 +// a33 a34 a35 +// a44 a45 a46 +// a55 a56 * +// a66 * * // -// ul = blas.Lower -// * * a11 -// * a21 a22 -// a31 a32 a33 -// a42 a43 a44 -// a53 a54 a55 -// a64 a65 a66 +// ul = blas.Lower +// * * a11 +// * a21 a22 +// a31 a32 a33 +// a42 a43 a44 +// a53 a54 a55 +// a64 a65 a66 // // Dpbtf2 is the unblocked version of the algorithm, see Dpbtrf for the blocked // version. diff --git a/lapack/gonum/dpbtrf.go b/lapack/gonum/dpbtrf.go index d8814cdb..12cdfc0f 100644 --- a/lapack/gonum/dpbtrf.go +++ b/lapack/gonum/dpbtrf.go @@ -11,31 +11,33 @@ import ( // Dpbtrf computes the Cholesky factorization of an n×n symmetric positive // definite band matrix -// A = Uᵀ * U if uplo == blas.Upper -// A = L * Lᵀ if uplo == blas.Lower +// +// A = Uᵀ * U if uplo == blas.Upper +// A = L * Lᵀ if uplo == blas.Lower +// // where U is an upper triangular band matrix and L is lower triangular. kd is // the number of super- or sub-diagonals of A. // // The band storage scheme is illustrated below when n = 6 and kd = 2. Elements // marked * are not used by the function. // -// uplo == blas.Upper -// On entry: On return: -// a00 a01 a02 u00 u01 u02 -// a11 a12 a13 u11 u12 u13 -// a22 a23 a24 u22 u23 u24 -// a33 a34 a35 u33 u34 u35 -// a44 a45 * u44 u45 * -// a55 * * u55 * * +// uplo == blas.Upper +// On entry: On return: +// a00 a01 a02 u00 u01 u02 +// a11 a12 a13 u11 u12 u13 +// a22 a23 a24 u22 u23 u24 +// a33 a34 a35 u33 u34 u35 +// a44 a45 * u44 u45 * +// a55 * * u55 * * // -// uplo == blas.Lower -// On entry: On return: -// * * a00 * * l00 -// * a10 a11 * l10 l11 -// a20 a21 a22 l20 l21 l22 -// a31 a32 a33 l31 l32 l33 -// a42 a43 a44 l42 l43 l44 -// a53 a54 a55 l53 l54 l55 +// uplo == blas.Lower +// On entry: On return: +// * * a00 * * l00 +// * a10 a11 * l10 l11 +// a20 a21 a22 l20 l21 l22 +// a31 a32 a33 l31 l32 l33 +// a42 a43 a44 l42 l43 l44 +// a53 a54 a55 l53 l54 l55 func (impl Implementation) Dpbtrf(uplo blas.Uplo, n, kd int, ab []float64, ldab int) (ok bool) { const nbmax = 32 diff --git a/lapack/gonum/dpbtrs.go b/lapack/gonum/dpbtrs.go index f3d9559d..97c9ada0 100644 --- a/lapack/gonum/dpbtrs.go +++ b/lapack/gonum/dpbtrs.go @@ -11,8 +11,10 @@ import ( // Dpbtrs solves a system of linear equations A*X = B with an n×n symmetric // positive definite band matrix A using the Cholesky factorization -// A = Uᵀ * U if uplo == blas.Upper -// A = L * Lᵀ if uplo == blas.Lower +// +// A = Uᵀ * U if uplo == blas.Upper +// A = L * Lᵀ if uplo == blas.Lower +// // computed by Dpbtrf. kd is the number of super- or sub-diagonals of A. See the // documentation for Dpbtrf for a description of the band storage format of A. // diff --git a/lapack/gonum/dpotrs.go b/lapack/gonum/dpotrs.go index 8977013d..77d07000 100644 --- a/lapack/gonum/dpotrs.go +++ b/lapack/gonum/dpotrs.go @@ -12,8 +12,10 @@ import ( // Dpotrs solves a system of n linear equations A*X = B where A is an n×n // symmetric positive definite matrix and B is an n×nrhs matrix. The matrix A is // represented by its Cholesky factorization -// A = Uᵀ*U if uplo == blas.Upper -// A = L*Lᵀ if uplo == blas.Lower +// +// A = Uᵀ*U if uplo == blas.Upper +// A = L*Lᵀ if uplo == blas.Lower +// // as computed by Dpotrf. On entry, B contains the right-hand side matrix B, on // return it contains the solution matrix X. func (Implementation) Dpotrs(uplo blas.Uplo, n, nrhs int, a []float64, lda int, b []float64, ldb int) { diff --git a/lapack/gonum/dpstf2.go b/lapack/gonum/dpstf2.go index 134fa674..79b607dd 100644 --- a/lapack/gonum/dpstf2.go +++ b/lapack/gonum/dpstf2.go @@ -15,8 +15,10 @@ import ( // symmetric positive semidefinite matrix A. // // The factorization has the form -// Pᵀ * A * P = Uᵀ * U , if uplo = blas.Upper, -// Pᵀ * A * P = L * Lᵀ, if uplo = blas.Lower, +// +// Pᵀ * A * P = Uᵀ * U , if uplo = blas.Upper, +// Pᵀ * A * P = L * Lᵀ, if uplo = blas.Lower, +// // where U is an upper triangular matrix, L is lower triangular, and P is a // permutation matrix. // diff --git a/lapack/gonum/dpstrf.go b/lapack/gonum/dpstrf.go index 21ea49a3..46a2fd4b 100644 --- a/lapack/gonum/dpstrf.go +++ b/lapack/gonum/dpstrf.go @@ -15,8 +15,10 @@ import ( // symmetric positive semidefinite matrix A. // // The factorization has the form -// Pᵀ * A * P = Uᵀ * U , if uplo = blas.Upper, -// Pᵀ * A * P = L * Lᵀ, if uplo = blas.Lower, +// +// Pᵀ * A * P = Uᵀ * U , if uplo = blas.Upper, +// Pᵀ * A * P = L * Lᵀ, if uplo = blas.Lower, +// // where U is an upper triangular matrix, L is lower triangular, and P is a // permutation matrix. // diff --git a/lapack/gonum/dsytd2.go b/lapack/gonum/dsytd2.go index 8658f4e5..03e7cc07 100644 --- a/lapack/gonum/dsytd2.go +++ b/lapack/gonum/dsytd2.go @@ -11,7 +11,9 @@ import ( // Dsytd2 reduces a symmetric n×n matrix A to symmetric tridiagonal form T by // an orthogonal similarity transformation -// Qᵀ * A * Q = T +// +// Qᵀ * A * Q = T +// // On entry, the matrix is contained in the specified triangle of a. On exit, // if uplo == blas.Upper, the diagonal and first super-diagonal of a are // overwritten with the elements of T. The elements above the first super-diagonal @@ -24,28 +26,37 @@ import ( // // Q is represented as a product of elementary reflectors. // If uplo == blas.Upper -// Q = H_{n-2} * ... * H_1 * H_0 +// +// Q = H_{n-2} * ... * H_1 * H_0 +// // and if uplo == blas.Lower -// Q = H_0 * H_1 * ... * H_{n-2} +// +// Q = H_0 * H_1 * ... * H_{n-2} +// // where -// H_i = I - tau * v * vᵀ +// +// H_i = I - tau * v * vᵀ +// // where tau is stored in tau[i], and v is stored in a. // // If uplo == blas.Upper, v[0:i-1] is stored in A[0:i-1,i+1], v[i] = 1, and // v[i+1:] = 0. The elements of a are -// [ d e v2 v3 v4] -// [ d e v3 v4] -// [ d e v4] -// [ d e] -// [ d] +// +// [ d e v2 v3 v4] +// [ d e v3 v4] +// [ d e v4] +// [ d e] +// [ d] +// // If uplo == blas.Lower, v[0:i+1] = 0, v[i+1] = 1, and v[i+2:] is stored in // A[i+2:n,i]. // The elements of a are -// [ d ] -// [ e d ] -// [v1 e d ] -// [v1 v2 e d ] -// [v1 v2 v3 e d] +// +// [ d ] +// [ e d ] +// [v1 e d ] +// [v1 v2 e d ] +// [v1 v2 v3 e d] // // Dsytd2 is an internal routine. It is exported for testing purposes. func (impl Implementation) Dsytd2(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau []float64) { diff --git a/lapack/gonum/dsytrd.go b/lapack/gonum/dsytrd.go index 262a56c9..74d2287e 100644 --- a/lapack/gonum/dsytrd.go +++ b/lapack/gonum/dsytrd.go @@ -11,7 +11,9 @@ import ( // Dsytrd reduces a symmetric n×n matrix A to symmetric tridiagonal form by an // orthogonal similarity transformation -// Qᵀ * A * Q = T +// +// Qᵀ * A * Q = T +// // where Q is an orthonormal matrix and T is symmetric and tridiagonal. // // On entry, a contains the elements of the input matrix in the triangle specified @@ -21,28 +23,38 @@ import ( // the product of elementary reflectors. // // If uplo == blas.Upper, Q is constructed with -// Q = H_{n-2} * ... * H_1 * H_0 +// +// Q = H_{n-2} * ... * H_1 * H_0 +// // where -// H_i = I - tau_i * v * vᵀ +// +// H_i = I - tau_i * v * vᵀ +// // v is constructed as v[i+1:n] = 0, v[i] = 1, v[0:i-1] is stored in A[0:i-1, i+1]. // The elements of A are -// [ d e v1 v2 v3] -// [ d e v2 v3] -// [ d e v3] -// [ d e] -// [ e] +// +// [ d e v1 v2 v3] +// [ d e v2 v3] +// [ d e v3] +// [ d e] +// [ e] // // If uplo == blas.Lower, Q is constructed with -// Q = H_0 * H_1 * ... * H_{n-2} +// +// Q = H_0 * H_1 * ... * H_{n-2} +// // where -// H_i = I - tau_i * v * vᵀ +// +// H_i = I - tau_i * v * vᵀ +// // v is constructed as v[0:i+1] = 0, v[i+1] = 1, v[i+2:n] is stored in A[i+2:n, i]. // The elements of A are -// [ d ] -// [ e d ] -// [v0 e d ] -// [v0 v1 e d ] -// [v0 v1 v2 e d] +// +// [ d ] +// [ e d ] +// [v0 e d ] +// [v0 v1 e d ] +// [v0 v1 v2 e d] // // d must have length n, and e and tau must have length n-1. Dsytrd will panic if // these conditions are not met. diff --git a/lapack/gonum/dtbtrs.go b/lapack/gonum/dtbtrs.go index 11e128b8..6b56d9e0 100644 --- a/lapack/gonum/dtbtrs.go +++ b/lapack/gonum/dtbtrs.go @@ -10,8 +10,10 @@ import ( ) // Dtbtrs solves a triangular system of the form -// A * X = B if trans == blas.NoTrans -// Aᵀ * X = B if trans == blas.Trans or blas.ConjTrans +// +// A * X = B if trans == blas.NoTrans +// Aᵀ * X = B if trans == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular band matrix with kd super- or subdiagonals, and // B is an n×nrhs matrix. // diff --git a/lapack/gonum/dtgsja.go b/lapack/gonum/dtgsja.go index 5bc0ac1a..b3f0208a 100644 --- a/lapack/gonum/dtgsja.go +++ b/lapack/gonum/dtgsja.go @@ -19,18 +19,18 @@ import ( // preprocessing subroutine Dggsvp from a general m×n matrix A and p×n // matrix B: // -// n-k-l k l -// A = k [ 0 A12 A13 ] if m-k-l >= 0; -// l [ 0 0 A23 ] -// m-k-l [ 0 0 0 ] +// n-k-l k l +// A = k [ 0 A12 A13 ] if m-k-l >= 0; +// l [ 0 0 A23 ] +// m-k-l [ 0 0 0 ] // -// n-k-l k l -// A = k [ 0 A12 A13 ] if m-k-l < 0; -// m-k [ 0 0 A23 ] +// n-k-l k l +// A = k [ 0 A12 A13 ] if m-k-l < 0; +// m-k [ 0 0 A23 ] // -// n-k-l k l -// B = l [ 0 0 B13 ] -// p-l [ 0 0 0 ] +// n-k-l k l +// B = l [ 0 0 B13 ] +// p-l [ 0 0 0 ] // // where the k×k matrix A12 and l×l matrix B13 are non-singular // upper triangular. A23 is l×l upper triangular if m-k-l >= 0, @@ -38,7 +38,7 @@ import ( // // On exit, // -// Uᵀ*A*Q = D1*[ 0 R ], Vᵀ*B*Q = D2*[ 0 R ], +// Uᵀ*A*Q = D1*[ 0 R ], Vᵀ*B*Q = D2*[ 0 R ], // // where U, V and Q are orthogonal matrices. // R is a non-singular upper triangular matrix, and D1 and D2 are @@ -46,54 +46,59 @@ import ( // // If m-k-l >= 0, // -// k l -// D1 = k [ I 0 ] -// l [ 0 C ] -// m-k-l [ 0 0 ] +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] // -// k l -// D2 = l [ 0 S ] -// p-l [ 0 0 ] +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] // -// n-k-l k l -// [ 0 R ] = k [ 0 R11 R12 ] k -// l [ 0 0 R22 ] l +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l // // where // -// C = diag( alpha_k, ... , alpha_{k+l} ), -// S = diag( beta_k, ... , beta_{k+l} ), -// C^2 + S^2 = I. +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. // // R is stored in -// A[0:k+l, n-k-l:n] +// +// A[0:k+l, n-k-l:n] +// // on exit. // // If m-k-l < 0, // -// k m-k k+l-m -// D1 = k [ I 0 0 ] -// m-k [ 0 C 0 ] +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] // -// k m-k k+l-m -// D2 = m-k [ 0 S 0 ] -// k+l-m [ 0 0 I ] -// p-l [ 0 0 0 ] +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] // -// n-k-l k m-k k+l-m -// [ 0 R ] = k [ 0 R11 R12 R13 ] -// m-k [ 0 0 R22 R23 ] -// k+l-m [ 0 0 0 R33 ] +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] // // where -// C = diag( alpha_k, ... , alpha_m ), -// S = diag( beta_k, ... , beta_m ), -// C^2 + S^2 = I. // -// R = [ R11 R12 R13 ] is stored in A[0:m, n-k-l:n] -// [ 0 R22 R23 ] +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[0:m, n-k-l:n] +// [ 0 R22 R23 ] +// // and R33 is stored in -// B[m-k:l, n+m-k-l:n] on exit. +// +// B[m-k:l, n+m-k-l:n] on exit. // // The computation of the orthogonal transformation matrices U, V or Q // is optional. These matrices may either be formed explicitly, or they @@ -103,51 +108,64 @@ import ( // min(l,m-k)×l triangular or trapezoidal matrix A23 and l×l // matrix B13 to the form: // -// U1ᵀ*A13*Q1 = C1*R1; V1ᵀ*B13*Q1 = S1*R1, +// U1ᵀ*A13*Q1 = C1*R1; V1ᵀ*B13*Q1 = S1*R1, // // where U1, V1 and Q1 are orthogonal matrices. C1 and S1 are diagonal // matrices satisfying // -// C1^2 + S1^2 = I, +// C1^2 + S1^2 = I, // // and R1 is an l×l non-singular upper triangular matrix. // // jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior // is as follows -// jobU == lapack.GSVDU Compute orthogonal matrix U -// jobU == lapack.GSVDUnit Use unit-initialized matrix -// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDUnit Use unit-initialized matrix +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// // The behavior is the same for jobV and jobQ with the exception that instead of // lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. // The matrices U, V and Q must be m×m, p×p and n×n respectively unless the // relevant job parameter is lapack.GSVDNone. // // k and l specify the sub-blocks in the input matrices A and B: -// A23 = A[k:min(k+l,m), n-l:n) and B13 = B[0:l, n-l:n] +// +// A23 = A[k:min(k+l,m), n-l:n) and B13 = B[0:l, n-l:n] +// // of A and B, whose GSVD is going to be computed by Dtgsja. // // tola and tolb are the convergence criteria for the Jacobi-Kogbetliantz // iteration procedure. Generally, they are the same as used in the preprocessing // step, for example, -// tola = max(m, n)*norm(A)*eps, -// tolb = max(p, n)*norm(B)*eps, +// +// tola = max(m, n)*norm(A)*eps, +// tolb = max(p, n)*norm(B)*eps, +// // where eps is the machine epsilon. // // work must have length at least 2*n, otherwise Dtgsja will panic. // // alpha and beta must have length n or Dtgsja will panic. On exit, alpha and // beta contain the generalized singular value pairs of A and B -// alpha[0:k] = 1, -// beta[0:k] = 0, +// +// alpha[0:k] = 1, +// beta[0:k] = 0, +// // if m-k-l >= 0, -// alpha[k:k+l] = diag(C), -// beta[k:k+l] = diag(S), +// +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// // if m-k-l < 0, -// alpha[k:m]= C, alpha[m:k+l]= 0 -// beta[k:m] = S, beta[m:k+l] = 1. +// +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// // if k+l < n, -// alpha[k+l:n] = 0 and -// beta[k+l:n] = 0. +// +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. // // On exit, A[n-k:n, 0:min(k+l,m)] contains the triangular matrix R or part of R // and if necessary, B[m-k:l, n+m-k-l:n] contains a part of R. diff --git a/lapack/gonum/dtrevc3.go b/lapack/gonum/dtrevc3.go index c6568665..86197d3a 100644 --- a/lapack/gonum/dtrevc3.go +++ b/lapack/gonum/dtrevc3.go @@ -15,14 +15,19 @@ import ( // Dtrevc3 computes some or all of the right and/or left eigenvectors of an n×n // upper quasi-triangular matrix T in Schur canonical form. Matrices of this // type are produced by the Schur factorization of a real general matrix A -// A = Q T Qᵀ, +// +// A = Q T Qᵀ, +// // as computed by Dhseqr. // // The right eigenvector x of T corresponding to an // eigenvalue λ is defined by -// T x = λ x, +// +// T x = λ x, +// // and the left eigenvector y is defined by -// yᵀ T = λ yᵀ. +// +// yᵀ T = λ yᵀ. // // The eigenvalues are read directly from the diagonal blocks of T. // @@ -68,22 +73,26 @@ import ( // // On return, if side is lapack.EVLeft or lapack.EVBoth, // VL will contain: -// if howmny == lapack.EVAll, the matrix Y of left eigenvectors of T, -// if howmny == lapack.EVAllMulQ, the matrix Q*Y, -// if howmny == lapack.EVSelected, the left eigenvectors of T specified by -// selected, stored consecutively in the -// columns of VL, in the same order as their -// eigenvalues. +// +// if howmny == lapack.EVAll, the matrix Y of left eigenvectors of T, +// if howmny == lapack.EVAllMulQ, the matrix Q*Y, +// if howmny == lapack.EVSelected, the left eigenvectors of T specified by +// selected, stored consecutively in the +// columns of VL, in the same order as their +// eigenvalues. +// // VL is not referenced if side == lapack.EVRight. // // On return, if side is lapack.EVRight or lapack.EVBoth, // VR will contain: -// if howmny == lapack.EVAll, the matrix X of right eigenvectors of T, -// if howmny == lapack.EVAllMulQ, the matrix Q*X, -// if howmny == lapack.EVSelected, the left eigenvectors of T specified by -// selected, stored consecutively in the -// columns of VR, in the same order as their -// eigenvalues. +// +// if howmny == lapack.EVAll, the matrix X of right eigenvectors of T, +// if howmny == lapack.EVAllMulQ, the matrix Q*X, +// if howmny == lapack.EVSelected, the left eigenvectors of T specified by +// selected, stored consecutively in the +// columns of VR, in the same order as their +// eigenvalues. +// // VR is not referenced if side == lapack.EVLeft. // // Complex eigenvectors corresponding to a complex eigenvalue are stored in VL diff --git a/lapack/gonum/dtrexc.go b/lapack/gonum/dtrexc.go index c577ff9b..2a0a5e7c 100644 --- a/lapack/gonum/dtrexc.go +++ b/lapack/gonum/dtrexc.go @@ -7,7 +7,9 @@ package gonum import "gonum.org/v1/gonum/lapack" // Dtrexc reorders the real Schur factorization of a n×n real matrix -// A = Q*T*Qᵀ +// +// A = Q*T*Qᵀ +// // so that the diagonal block of T with row index ifst is moved to row ilst. // // On entry, T must be in Schur canonical form, that is, block upper triangular @@ -34,7 +36,9 @@ import "gonum.org/v1/gonum/lapack" // is true, ilstOut may differ from ilst by +1 or -1. // // It must hold that -// 0 <= ifst < n, and 0 <= ilst < n, +// +// 0 <= ifst < n, and 0 <= ilst < n, +// // otherwise Dtrexc will panic. // // If ok is false, two adjacent blocks were too close to swap because the diff --git a/lapack/gonum/ilaenv.go b/lapack/gonum/ilaenv.go index a70bbee2..c79597e3 100644 --- a/lapack/gonum/ilaenv.go +++ b/lapack/gonum/ilaenv.go @@ -6,20 +6,21 @@ package gonum // Ilaenv returns algorithm tuning parameters for the algorithm given by the // input string. ispec specifies the parameter to return: -// 1: The optimal block size for a blocked algorithm. -// 2: The minimum block size for a blocked algorithm. -// 3: The block size of unprocessed data at which a blocked algorithm should -// crossover to an unblocked version. -// 4: The number of shifts. -// 5: The minimum column dimension for blocking to be used. -// 6: The crossover point for SVD (to use QR factorization or not). -// 7: The number of processors. -// 8: The crossover point for multi-shift in QR and QZ methods for non-symmetric eigenvalue problems. -// 9: Maximum size of the subproblems in divide-and-conquer algorithms. -// 10: ieee infinity and NaN arithmetic can be trusted not to trap. -// 11: ieee infinity arithmetic can be trusted not to trap. -// 12...16: parameters for Dhseqr and related functions. See Iparmq for more -// information. +// +// 1: The optimal block size for a blocked algorithm. +// 2: The minimum block size for a blocked algorithm. +// 3: The block size of unprocessed data at which a blocked algorithm should +// crossover to an unblocked version. +// 4: The number of shifts. +// 5: The minimum column dimension for blocking to be used. +// 6: The crossover point for SVD (to use QR factorization or not). +// 7: The number of processors. +// 8: The crossover point for multi-shift in QR and QZ methods for non-symmetric eigenvalue problems. +// 9: Maximum size of the subproblems in divide-and-conquer algorithms. +// 10: ieee infinity and NaN arithmetic can be trusted not to trap. +// 11: ieee infinity arithmetic can be trusted not to trap. +// 12...16: parameters for Dhseqr and related functions. See Iparmq for more +// information. // // Ilaenv is an internal routine. It is exported for testing purposes. func (impl Implementation) Ilaenv(ispec int, name string, opts string, n1, n2, n3, n4 int) int { diff --git a/lapack/gonum/iparmq.go b/lapack/gonum/iparmq.go index 3800f11c..65d10524 100644 --- a/lapack/gonum/iparmq.go +++ b/lapack/gonum/iparmq.go @@ -10,11 +10,13 @@ import "math" // related subroutines for eigenvalue problems. // // ispec specifies the parameter to return: -// 12: Crossover point between Dlahqr and Dlaqr0. Will be at least 11. -// 13: Deflation window size. -// 14: Nibble crossover point. Determines when to skip a multi-shift QR sweep. -// 15: Number of simultaneous shifts in a multishift QR iteration. -// 16: Select structured matrix multiply. +// +// 12: Crossover point between Dlahqr and Dlaqr0. Will be at least 11. +// 13: Deflation window size. +// 14: Nibble crossover point. Determines when to skip a multi-shift QR sweep. +// 15: Number of simultaneous shifts in a multishift QR iteration. +// 16: Select structured matrix multiply. +// // For other values of ispec Iparmq will panic. // // name is the name of the calling function. name must be in uppercase but this diff --git a/lapack/lapack64/lapack64.go b/lapack/lapack64/lapack64.go index 12de74b2..acb62da4 100644 --- a/lapack/lapack64/lapack64.go +++ b/lapack/lapack64/lapack64.go @@ -36,8 +36,10 @@ func max(a, b int) int { // Potrf computes the Cholesky factorization of a. // The factorization has the form -// A = Uᵀ * U if a.Uplo == blas.Upper, or -// A = L * Lᵀ if a.Uplo == blas.Lower, +// +// A = Uᵀ * U if a.Uplo == blas.Upper, or +// A = L * Lᵀ if a.Uplo == blas.Lower, +// // where U is an upper triangular matrix and L is lower triangular. // The triangular matrix is returned in t, and the underlying data between // a and t is shared. The returned bool indicates whether a is positive @@ -84,11 +86,14 @@ func Potrs(t blas64.Triangular, b blas64.General) { // Pbcon returns an estimate of the reciprocal of the condition number (in the // 1-norm) of an n×n symmetric positive definite band matrix using the Cholesky // factorization -// A = Uᵀ*U if uplo == blas.Upper -// A = L*Lᵀ if uplo == blas.Lower +// +// A = Uᵀ*U if uplo == blas.Upper +// A = L*Lᵀ if uplo == blas.Lower +// // computed by Pbtrf. The estimate is obtained for norm(inv(A)), and the // reciprocal of the condition number is computed as -// rcond = 1 / (anorm * norm(inv(A))). +// +// rcond = 1 / (anorm * norm(inv(A))). // // The length of work must be at least 3*n and the length of iwork must be at // least n. @@ -98,8 +103,10 @@ func Pbcon(a blas64.SymmetricBand, anorm float64, work []float64, iwork []int) f // Pbtrf computes the Cholesky factorization of an n×n symmetric positive // definite band matrix -// A = Uᵀ * U if a.Uplo == blas.Upper -// A = L * Lᵀ if a.Uplo == blas.Lower +// +// A = Uᵀ * U if a.Uplo == blas.Upper +// A = L * Lᵀ if a.Uplo == blas.Lower +// // where U and L are upper, respectively lower, triangular band matrices. // // The triangular matrix U or L is returned in t, and the underlying data @@ -118,8 +125,10 @@ func Pbtrf(a blas64.SymmetricBand) (t blas64.TriangularBand, ok bool) { // Pbtrs solves a system of linear equations A*X = B with an n×n symmetric // positive definite band matrix A using the Cholesky factorization -// A = Uᵀ * U if t.Uplo == blas.Upper -// A = L * Lᵀ if t.Uplo == blas.Lower +// +// A = Uᵀ * U if t.Uplo == blas.Upper +// A = L * Lᵀ if t.Uplo == blas.Lower +// // t contains the corresponding triangular factor as returned by Pbtrf. // // On entry, b contains the right hand side matrix B. On return, it is @@ -132,8 +141,10 @@ func Pbtrs(t blas64.TriangularBand, b blas64.General) { // symmetric positive semidefinite matrix A. // // The factorization has the form -// Pᵀ * A * P = Uᵀ * U , if a.Uplo = blas.Upper, -// Pᵀ * A * P = L * Lᵀ, if a.Uplo = blas.Lower, +// +// Pᵀ * A * P = Uᵀ * U , if a.Uplo = blas.Upper, +// Pᵀ * A * P = L * Lᵀ, if a.Uplo = blas.Lower, +// // where U is an upper triangular matrix, L is lower triangular, and P is a // permutation matrix. // @@ -191,6 +202,7 @@ func Gecon(norm lapack.MatrixNorm, a blas64.General, anorm float64, work []float // Aᵀ * X = B. // 4. If m < n and trans == blas.Trans, Gels finds X such that || A*X - B||_2 // is minimized. +// // Note that the least-squares solutions (cases 1 and 3) perform the minimization // per column of B. This is not the same as finding the minimum-norm matrix. // @@ -219,9 +231,11 @@ func Gels(trans blas.Transpose, a blas64.General, b blas64.General, work []float // // The ith elementary reflector can be explicitly constructed by first extracting // the -// v[j] = 0 j < i -// v[j] = 1 j == i -// v[j] = a[j*lda+i] j > i +// +// v[j] = 0 j < i +// v[j] = 1 j == i +// v[j] = a[j*lda+i] j > i +// // and computing H_i = I - tau[i] * v * vᵀ. // // The orthonormal matrix Q can be constucted from a product of these elementary @@ -259,7 +273,9 @@ func Gelqf(a blas64.General, tau, work []float64, lwork int) { // Gesvd computes the singular value decomposition of the input matrix A. // // The singular value decomposition is -// A = U * Sigma * Vᵀ +// +// A = U * Sigma * Vᵀ +// // where Sigma is an m×n diagonal matrix containing the singular values of A, // U is an m×m orthogonal matrix and V is an n×n orthogonal matrix. The first // min(m,n) columns of U and V are the left and right singular vectors of A @@ -267,10 +283,12 @@ func Gelqf(a blas64.General, tau, work []float64, lwork int) { // // jobU and jobVT are options for computing the singular vectors. The behavior // is as follows -// jobU == lapack.SVDAll All m columns of U are returned in u -// jobU == lapack.SVDStore The first min(m,n) columns are returned in u -// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a -// jobU == lapack.SVDNone The columns of U are not computed. +// +// jobU == lapack.SVDAll All m columns of U are returned in u +// jobU == lapack.SVDStore The first min(m,n) columns are returned in u +// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a +// jobU == lapack.SVDNone The columns of U are not computed. +// // The behavior is the same for jobVT and the rows of Vᵀ. At most one of jobU // and jobVT can equal lapack.SVDOverwrite, and Gesvd will panic otherwise. // @@ -304,7 +322,9 @@ func Gesvd(jobU, jobVT lapack.SVDJob, a, u, vt blas64.General, s, work []float64 // Getrf computes the LU decomposition of the m×n matrix A. // The LU decomposition is a factorization of A into -// A = P * L * U +// +// A = P * L * U +// // where P is a permutation matrix, L is a unit lower triangular matrix, and // U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored // in place into a. @@ -341,8 +361,10 @@ func Getri(a blas64.General, ipiv []int, work []float64, lwork int) (ok bool) { // Getrs solves a system of equations using an LU factorization. // The system of equations solved is -// A * X = B if trans == blas.Trans -// Aᵀ * X = B if trans == blas.NoTrans +// +// A * X = B if trans == blas.Trans +// Aᵀ * X = B if trans == blas.NoTrans +// // A is a general n×n matrix with stride lda. B is a general matrix of size n×nrhs. // // On entry b contains the elements of the matrix B. On exit, b contains the @@ -356,9 +378,11 @@ func Getrs(trans blas.Transpose, a blas64.General, b blas64.General, ipiv []int) // Ggsvd3 computes the generalized singular value decomposition (GSVD) // of an m×n matrix A and p×n matrix B: -// Uᵀ*A*Q = D1*[ 0 R ] // -// Vᵀ*B*Q = D2*[ 0 R ] +// Uᵀ*A*Q = D1*[ 0 R ] +// +// Vᵀ*B*Q = D2*[ 0 R ] +// // where U, V and Q are orthogonal matrices. // // Ggsvd3 returns k and l, the dimensions of the sub-blocks. k+l @@ -369,62 +393,69 @@ func Getrs(trans blas.Transpose, a blas64.General, b blas64.General, ipiv []int) // // If m-k-l >= 0, // -// k l -// D1 = k [ I 0 ] -// l [ 0 C ] -// m-k-l [ 0 0 ] +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] // -// k l -// D2 = l [ 0 S ] -// p-l [ 0 0 ] +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] // -// n-k-l k l -// [ 0 R ] = k [ 0 R11 R12 ] k -// l [ 0 0 R22 ] l +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l // // where // -// C = diag( alpha_k, ... , alpha_{k+l} ), -// S = diag( beta_k, ... , beta_{k+l} ), -// C^2 + S^2 = I. +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. // // R is stored in -// A[0:k+l, n-k-l:n] +// +// A[0:k+l, n-k-l:n] +// // on exit. // // If m-k-l < 0, // -// k m-k k+l-m -// D1 = k [ I 0 0 ] -// m-k [ 0 C 0 ] +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] // -// k m-k k+l-m -// D2 = m-k [ 0 S 0 ] -// k+l-m [ 0 0 I ] -// p-l [ 0 0 0 ] +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] // -// n-k-l k m-k k+l-m -// [ 0 R ] = k [ 0 R11 R12 R13 ] -// m-k [ 0 0 R22 R23 ] -// k+l-m [ 0 0 0 R33 ] +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] // // where -// C = diag( alpha_k, ... , alpha_m ), -// S = diag( beta_k, ... , beta_m ), -// C^2 + S^2 = I. // -// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] -// [ 0 R22 R23 ] +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] +// [ 0 R22 R23 ] +// // and R33 is stored in -// B[m-k:l, n+m-k-l:n] on exit. +// +// B[m-k:l, n+m-k-l:n] on exit. // // Ggsvd3 computes C, S, R, and optionally the orthogonal transformation // matrices U, V and Q. // // jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior // is as follows -// jobU == lapack.GSVDU Compute orthogonal matrix U -// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// // The behavior is the same for jobV and jobQ with the exception that instead of // lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. // The matrices U, V and Q must be m×m, p×p and n×n respectively unless the @@ -432,17 +463,24 @@ func Getrs(trans blas.Transpose, a blas64.General, b blas64.General, ipiv []int) // // alpha and beta must have length n or Ggsvd3 will panic. On exit, alpha and // beta contain the generalized singular value pairs of A and B -// alpha[0:k] = 1, -// beta[0:k] = 0, +// +// alpha[0:k] = 1, +// beta[0:k] = 0, +// // if m-k-l >= 0, -// alpha[k:k+l] = diag(C), -// beta[k:k+l] = diag(S), +// +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// // if m-k-l < 0, -// alpha[k:m]= C, alpha[m:k+l]= 0 -// beta[k:m] = S, beta[m:k+l] = 1. +// +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// // if k+l < n, -// alpha[k+l:n] = 0 and -// beta[k+l:n] = 0. +// +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. // // On exit, iwork contains the permutation required to sort alpha descending. // @@ -455,8 +493,10 @@ func Ggsvd3(jobU, jobV, jobQ lapack.GSVDJob, a, b blas64.General, alpha, beta [] } // Gtsv solves one of the equations -// A * X = B if trans == blas.NoTrans -// Aᵀ * X = B if trans == blas.Trans or blas.ConjTrans +// +// A * X = B if trans == blas.NoTrans +// Aᵀ * X = B if trans == blas.Trans or blas.ConjTrans +// // where A is an n×n tridiagonal matrix. It uses Gaussian elimination with // partial pivoting. // @@ -477,8 +517,10 @@ func Gtsv(trans blas.Transpose, a Tridiagonal, b blas64.General) (ok bool) { } // Lagtm performs one of the matrix-matrix operations -// C = alpha * A * B + beta * C if trans == blas.NoTrans -// C = alpha * Aᵀ * B + beta * C if trans == blas.Trans or blas.ConjTrans +// +// C = alpha * A * B + beta * C if trans == blas.NoTrans +// C = alpha * Aᵀ * B + beta * C if trans == blas.Trans or blas.ConjTrans +// // where A is an m×m tridiagonal matrix represented by its diagonals dl, d, du, // B and C are m×n dense matrices, and alpha and beta are scalars. // @@ -490,10 +532,12 @@ func Lagtm(trans blas.Transpose, alpha float64, a Tridiagonal, b blas64.General, // Lange computes the matrix norm of the general m×n matrix A. The input norm // specifies the norm computed. -// lapack.MaxAbs: the maximum absolute value of an element. -// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. -// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. -// lapack.Frobenius: the square root of the sum of the squares of the entries. +// +// lapack.MaxAbs: the maximum absolute value of an element. +// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. +// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. +// lapack.Frobenius: the square root of the sum of the squares of the entries. +// // If norm == lapack.MaxColumnSum, work must be of length n, and this function will panic otherwise. // There are no restrictions on work for the other matrix norms. func Lange(norm lapack.MatrixNorm, a blas64.General, work []float64) float64 { @@ -555,9 +599,12 @@ func Lantb(norm lapack.MatrixNorm, a blas64.TriangularBand, work []float64) floa // k[0],k[1],...,k[m-1] of the integers 0,...,m-1. // // If forward is true, a forward permutation is applied: -// X[k[i],0:n] is moved to X[i,0:n] for i=0,1,...,m-1. +// +// X[k[i],0:n] is moved to X[i,0:n] for i=0,1,...,m-1. +// // If forward is false, a backward permutation is applied: -// X[i,0:n] is moved to X[k[i],0:n] for i=0,1,...,m-1. +// +// X[i,0:n] is moved to X[k[i],0:n] for i=0,1,...,m-1. // // k must have length m, otherwise Lapmr will panic. func Lapmr(forward bool, x blas64.General, k []int) { @@ -569,11 +616,11 @@ func Lapmr(forward bool, x blas64.General, k []int) { // // If forward is true a forward permutation is performed: // -// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. +// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. // // otherwise a backward permutation is performed: // -// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. +// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. // // k must have length n, otherwise Lapmt will panic. k is zero-indexed. func Lapmt(forward bool, x blas64.General, k []int) { @@ -582,10 +629,12 @@ func Lapmt(forward bool, x blas64.General, k []int) { // Ormlq multiplies the matrix C by the othogonal matrix Q defined by // A and tau. A and tau are as returned from Gelqf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Qᵀ * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Qᵀ if side == blas.Right and trans == blas.Trans +// +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Qᵀ * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Qᵀ if side == blas.Right and trans == blas.Trans +// // If side == blas.Left, A is a matrix of side k×m, and if side == blas.Right // A is of size k×n. This uses a blocked algorithm. // @@ -603,12 +652,15 @@ func Ormlq(side blas.Side, trans blas.Transpose, a blas64.General, tau []float64 } // Ormqr multiplies an m×n matrix C by an orthogonal matrix Q as -// C = Q * C if side == blas.Left and trans == blas.NoTrans, -// C = Qᵀ * C if side == blas.Left and trans == blas.Trans, -// C = C * Q if side == blas.Right and trans == blas.NoTrans, -// C = C * Qᵀ if side == blas.Right and trans == blas.Trans, +// +// C = Q * C if side == blas.Left and trans == blas.NoTrans, +// C = Qᵀ * C if side == blas.Left and trans == blas.Trans, +// C = C * Q if side == blas.Right and trans == blas.NoTrans, +// C = C * Qᵀ if side == blas.Right and trans == blas.Trans, +// // where Q is defined as the product of k elementary reflectors -// Q = H_0 * H_1 * ... * H_{k-1}. +// +// Q = H_0 * H_1 * ... * H_{k-1}. // // If side == blas.Left, A is an m×k matrix and 0 <= k <= m. // If side == blas.Right, A is an n×k matrix and 0 <= k <= n. @@ -666,8 +718,10 @@ func Syev(jobz lapack.EVJob, a blas64.Symmetric, w, work []float64, lwork int) ( } // Tbtrs solves a triangular system of the form -// A * X = B if trans == blas.NoTrans -// Aᵀ * X = B if trans == blas.Trans or blas.ConjTrans +// +// A * X = B if trans == blas.NoTrans +// Aᵀ * X = B if trans == blas.Trans or blas.ConjTrans +// // where A is an n×n triangular band matrix, and B is an n×nrhs matrix. // // Tbtrs returns whether A is non-singular. If A is singular, no solutions X @@ -706,22 +760,30 @@ func Trtrs(trans blas.Transpose, a blas64.Triangular, b blas64.General) (ok bool // // The right eigenvector v_j of A corresponding to an eigenvalue λ_j // is defined by -// A v_j = λ_j v_j, +// +// A v_j = λ_j v_j, +// // and the left eigenvector u_j corresponding to an eigenvalue λ_j is defined by -// u_jᴴ A = λ_j u_jᴴ, +// +// u_jᴴ A = λ_j u_jᴴ, +// // where u_jᴴ is the conjugate transpose of u_j. // // On return, A will be overwritten and the left and right eigenvectors will be // stored, respectively, in the columns of the n×n matrices VL and VR in the // same order as their eigenvalues. If the j-th eigenvalue is real, then -// u_j = VL[:,j], -// v_j = VR[:,j], +// +// u_j = VL[:,j], +// v_j = VR[:,j], +// // and if it is not real, then j and j+1 form a complex conjugate pair and the // eigenvectors can be recovered as -// u_j = VL[:,j] + i*VL[:,j+1], -// u_{j+1} = VL[:,j] - i*VL[:,j+1], -// v_j = VR[:,j] + i*VR[:,j+1], -// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// +// u_j = VL[:,j] + i*VL[:,j+1], +// u_{j+1} = VL[:,j] - i*VL[:,j+1], +// v_j = VR[:,j] + i*VR[:,j+1], +// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// // where i is the imaginary unit. The computed eigenvectors are normalized to // have Euclidean norm equal to 1 and largest component real. // diff --git a/lapack/testlapack/dgeev.go b/lapack/testlapack/dgeev.go index 8ea32fcc..e098476d 100644 --- a/lapack/testlapack/dgeev.go +++ b/lapack/testlapack/dgeev.go @@ -729,7 +729,9 @@ func dgeevTestForAntisymRandom(n int, rnd *rand.Rand) dgeevTest { } // residualRightEV returns the residual -// | A E - E W|_1 / ( |A|_1 |E|_1 ) +// +// | A E - E W|_1 / ( |A|_1 |E|_1 ) +// // where the columns of E contain the right eigenvectors of A and W is a block diagonal matrix with // a 1×1 block for each real eigenvalue and a 2×2 block for each complex conjugate pair. func residualRightEV(a, e blas64.General, wr, wi []float64) float64 { @@ -783,7 +785,9 @@ func residualRightEV(a, e blas64.General, wr, wi []float64) float64 { } // residualLeftEV returns the residual -// | Aᵀ E - E Wᵀ|_1 / ( |Aᵀ|_1 |E|_1 ) +// +// | Aᵀ E - E Wᵀ|_1 / ( |Aᵀ|_1 |E|_1 ) +// // where the columns of E contain the left eigenvectors of A and W is a block diagonal matrix with // a 1×1 block for each real eigenvalue and a 2×2 block for each complex conjugate pair. func residualLeftEV(a, e blas64.General, wr, wi []float64) float64 { diff --git a/lapack/testlapack/dgesvd.go b/lapack/testlapack/dgesvd.go index 5107e439..7d9e2f26 100644 --- a/lapack/testlapack/dgesvd.go +++ b/lapack/testlapack/dgesvd.go @@ -34,13 +34,15 @@ func DgesvdTest(t *testing.T, impl Dgesvder, tol float64) { // dgesvdTest tests a Dgesvd implementation on an m×n matrix A generated // according to mtype as: -// - the zero matrix if mtype == 1, -// - the identity matrix if mtype == 2, -// - a random matrix with a given condition number and singular values if mtype == 3, 4, or 5. +// - the zero matrix if mtype == 1, +// - the identity matrix if mtype == 2, +// - a random matrix with a given condition number and singular values if mtype == 3, 4, or 5. +// // It first computes the full SVD A = U*Sigma*Vᵀ and checks that -// - U has orthonormal columns, and Vᵀ has orthonormal rows, -// - U*Sigma*Vᵀ multiply back to A, -// - the singular values are non-negative and sorted in decreasing order. +// - U has orthonormal columns, and Vᵀ has orthonormal rows, +// - U*Sigma*Vᵀ multiply back to A, +// - the singular values are non-negative and sorted in decreasing order. +// // Then all combinations of partial SVD results are computed and checked whether // they match the full SVD result. func dgesvdTest(t *testing.T, impl Dgesvder, m, n, mtype int, tol float64) { @@ -288,7 +290,9 @@ func dgesvdTest(t *testing.T, impl Dgesvder, m, n, mtype int, tol float64) { } // svdFullResidual returns -// |A - U*D*VT| / (n * aNorm) +// +// |A - U*D*VT| / (n * aNorm) +// // where U, D, and VT are as computed by Dgesvd with jobU = jobVT = lapack.SVDAll. func svdFullResidual(m, n int, aNorm float64, a []float64, lda int, u []float64, ldu int, d []float64, vt []float64, ldvt int) float64 { // The implementation follows TESTING/dbdt01.f from the reference. @@ -341,7 +345,9 @@ func svdFullResidual(m, n int, aNorm float64, a []float64, lda int, u []float64, // svdPartialUResidual compares U and URef to see if their columns span the same // spaces. It returns the maximum over columns of -// |URef(i) - S*U(i)| +// +// |URef(i) - S*U(i)| +// // where URef(i) and U(i) are the i-th columns of URef and U, respectively, and // S is ±1 chosen to minimize the expression. func svdPartialUResidual(m, n int, u, uRef []float64, ldu int) float64 { @@ -359,7 +365,9 @@ func svdPartialUResidual(m, n int, u, uRef []float64, ldu int) float64 { // svdPartialVTResidual compares VT and VTRef to see if their rows span the same // spaces. It returns the maximum over rows of -// |VTRef(i) - S*VT(i)| +// +// |VTRef(i) - S*VT(i)| +// // where VTRef(i) and VT(i) are the i-th columns of VTRef and VT, respectively, and // S is ±1 chosen to minimize the expression. func svdPartialVTResidual(m, n int, vt, vtRef []float64, ldvt int) float64 { diff --git a/lapack/testlapack/dlag2.go b/lapack/testlapack/dlag2.go index 2c43704e..ee250b95 100644 --- a/lapack/testlapack/dlag2.go +++ b/lapack/testlapack/dlag2.go @@ -146,9 +146,11 @@ func makeDlag2TestMatrix(rnd *rand.Rand, ld, kind int) blas64.General { } // residualDlag2 returns the value of -// | det( s*A - w*B ) | -// ------------------------------------------- -// max(s*norm(A), |w|*norm(B))*norm(s*A - w*B) +// +// | det( s*A - w*B ) | +// ------------------------------------------- +// max(s*norm(A), |w|*norm(B))*norm(s*A - w*B) +// // that can be used to check the generalized eigenvalues computed by Dlag2 and // an error that indicates invalid input data. func residualDlag2(a, b blas64.General, s float64, w complex128) (float64, error) { @@ -217,8 +219,9 @@ func scale(f float64, c complex128) complex128 { } // cmplxdet2x2 returns the determinant of -// |a11 a12| -// |a21 a22| +// +// |a11 a12| +// |a21 a22| func cmplxdet2x2(a11, a12, a21, a22 complex128) complex128 { return a11*a22 - a12*a21 } diff --git a/lapack/testlapack/dlatbs.go b/lapack/testlapack/dlatbs.go index 169e4a4b..df03ca11 100644 --- a/lapack/testlapack/dlatbs.go +++ b/lapack/testlapack/dlatbs.go @@ -107,7 +107,8 @@ func dlatbsTest(t *testing.T, impl Dlatbser, rnd *rand.Rand, kind int, uplo blas // dlatbsResidual returns the residual for the solution to a scaled triangular // system of equations A*x = s*b or Aᵀ*x = s*b when A is an n×n triangular // band matrix with kd super- or sub-diagonals. The residual is computed as -// norm( op(A)*x - scale*b ) / ( norm(op(A)) * norm(x) ). +// +// norm( op(A)*x - scale*b ) / ( norm(op(A)) * norm(x) ). // // This function corresponds to DTBT03 in Reference LAPACK. func dlatbsResidual(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, kd int, ab []float64, ldab int, scale float64, cnorm, b, x []float64) float64 { diff --git a/lapack/testlapack/dpbtrf.go b/lapack/testlapack/dpbtrf.go index a08b6e72..42487e50 100644 --- a/lapack/testlapack/dpbtrf.go +++ b/lapack/testlapack/dpbtrf.go @@ -65,8 +65,10 @@ func dpbtrfTest(t *testing.T, impl Dpbtrfer, uplo blas.Uplo, n, kd int, ldab int } // dsbmm computes a symmetric band matrix A -// A = Uᵀ*U if uplo == blas.Upper, -// A = L*Lᵀ if uplo == blas.Lower, +// +// A = Uᵀ*U if uplo == blas.Upper, +// A = L*Lᵀ if uplo == blas.Lower, +// // where U and L is an upper, respectively lower, triangular band matrix // stored on entry in ab. The result is stored in-place into ab. func dsbmm(uplo blas.Uplo, n, kd int, ab []float64, ldab int) { diff --git a/lapack/testlapack/dsteqr.go b/lapack/testlapack/dsteqr.go index e365d522..8f9d45f4 100644 --- a/lapack/testlapack/dsteqr.go +++ b/lapack/testlapack/dsteqr.go @@ -148,7 +148,9 @@ func DsteqrTest(t *testing.T, impl Dsteqrer) { // eigenDecompCorrect returns whether the eigen decomposition is correct. // It checks if -// A * v ≈ λ * v +// +// A * v ≈ λ * v +// // where the eigenvalues λ are stored in values, and the eigenvectors are stored // in the columns of v. func eigenDecompCorrect(values []float64, A, V blas64.General) bool { diff --git a/lapack/testlapack/dtrevc3.go b/lapack/testlapack/dtrevc3.go index 598a440f..dce3d8c0 100644 --- a/lapack/testlapack/dtrevc3.go +++ b/lapack/testlapack/dtrevc3.go @@ -362,7 +362,8 @@ func dtrevc3Test(t *testing.T, impl Dtrevc3er, side lapack.EVSide, n, extra int, } // residualEVNormalization returns the maximum normalization error in E: -// max |max-norm(E[:,j]) - 1| +// +// max |max-norm(E[:,j]) - 1| func residualEVNormalization(emat blas64.General, wi []float64) float64 { n := emat.Rows if n == 0 { diff --git a/lapack/testlapack/general.go b/lapack/testlapack/general.go index 1e2ae0ea..9dd14143 100644 --- a/lapack/testlapack/general.go +++ b/lapack/testlapack/general.go @@ -259,14 +259,14 @@ func randomSchurCanonical(n, stride int, bad bool, rnd *rand.Rand) (t blas64.Gen // blockedUpperTriGeneral returns a normal random, general matrix in the form // -// c-k-l k l -// A = k [ 0 A12 A13 ] if r-k-l >= 0; -// l [ 0 0 A23 ] -// r-k-l [ 0 0 0 ] +// c-k-l k l +// A = k [ 0 A12 A13 ] if r-k-l >= 0; +// l [ 0 0 A23 ] +// r-k-l [ 0 0 0 ] // -// c-k-l k l -// A = k [ 0 A12 A13 ] if r-k-l < 0; -// r-k [ 0 0 A23 ] +// c-k-l k l +// A = k [ 0 A12 A13 ] if r-k-l < 0; +// r-k [ 0 0 A23 ] // // where the k×k matrix A12 and l×l matrix is non-singular // upper triangular. A23 is l×l upper triangular if r-k-l >= 0, @@ -917,6 +917,7 @@ func constructQPBidiagonal(vect lapack.ApplyOrtho, m, n, nb int, a []float64, ld // printRowise prints the matrix with one row per line. This is useful for debugging. // If beyond is true, it prints beyond the final column to lda. If false, only // the columns are printed. +// //lint:ignore U1000 This is useful for debugging. func printRowise(a []float64, m, n, lda int, beyond bool) { for i := 0; i < m; i++ { @@ -1113,6 +1114,7 @@ func isSchurCanonicalGeneral(t blas64.General) bool { // schurBlockEigenvalues returns the two eigenvalues of the 2×2 matrix [a b; c d] // that must be in Schur canonical form. +// //lint:ignore U1000 This is useful for debugging. func schurBlockEigenvalues(a, b, c, d float64) (ev1, ev2 complex128) { if !isSchurCanonical(a, b, c, d) { @@ -1376,8 +1378,10 @@ func svdJobString(job lapack.SVDJob) string { } // residualOrthogonal returns the residual -// |I - Q * Qᵀ| if m < n or (m == n and rowwise == true), -// |I - Qᵀ * Q| otherwise. +// +// |I - Q * Qᵀ| if m < n or (m == n and rowwise == true), +// |I - Qᵀ * Q| otherwise. +// // It can be used to check that the matrix Q is orthogonal. func residualOrthogonal(q blas64.General, rowwise bool) float64 { m, n := q.Rows, q.Cols diff --git a/lapack/testlapack/matgen.go b/lapack/testlapack/matgen.go index c46b2cf9..5bc2deea 100644 --- a/lapack/testlapack/matgen.go +++ b/lapack/testlapack/matgen.go @@ -17,13 +17,15 @@ import ( // Dlatm1 computes the entries of dst as specified by mode, cond and rsign. // // mode describes how dst will be computed: -// |mode| == 1: dst[0] = 1 and dst[1:n] = 1/cond -// |mode| == 2: dst[:n-1] = 1/cond and dst[n-1] = 1 -// |mode| == 3: dst[i] = cond^{-i/(n-1)}, i=0,...,n-1 -// |mode| == 4: dst[i] = 1 - i*(1-1/cond)/(n-1) -// |mode| == 5: dst[i] = random number in the range (1/cond, 1) such that -// their logarithms are uniformly distributed -// |mode| == 6: dst[i] = random number from the distribution given by dist +// +// |mode| == 1: dst[0] = 1 and dst[1:n] = 1/cond +// |mode| == 2: dst[:n-1] = 1/cond and dst[n-1] = 1 +// |mode| == 3: dst[i] = cond^{-i/(n-1)}, i=0,...,n-1 +// |mode| == 4: dst[i] = 1 - i*(1-1/cond)/(n-1) +// |mode| == 5: dst[i] = random number in the range (1/cond, 1) such that +// their logarithms are uniformly distributed +// |mode| == 6: dst[i] = random number from the distribution given by dist +// // If mode is negative, the order of the elements of dst will be reversed. // For other values of mode Dlatm1 will panic. // @@ -31,9 +33,11 @@ import ( // or -1 with probability 0.5 // // dist specifies the type of distribution to be used when mode == ±6: -// dist == 1: Uniform[0,1) -// dist == 2: Uniform[-1,1) -// dist == 3: Normal(0,1) +// +// dist == 1: Uniform[0,1) +// dist == 2: Uniform[-1,1) +// dist == 3: Normal(0,1) +// // For other values of dist Dlatm1 will panic. // // rnd is used as a source of random numbers. @@ -124,12 +128,14 @@ func Dlatm1(dst []float64, mode int, cond float64, rsign bool, dist int, rnd *ra // Dlagsy generates an n×n symmetric matrix A, by pre- and post- multiplying a // real diagonal matrix D with a random orthogonal matrix: -// A = U * D * Uᵀ. +// +// A = U * D * Uᵀ. // // work must have length at least 2*n, otherwise Dlagsy will panic. // // The parameter k is unused but it must satisfy -// 0 <= k <= n-1. +// +// 0 <= k <= n-1. func Dlagsy(n, k int, d []float64, a []float64, lda int, rnd *rand.Rand, work []float64) { checkMatrix(n, n, a, lda) if k < 0 || max(0, n-1) < k { @@ -193,14 +199,16 @@ func Dlagsy(n, k int, d []float64, a []float64, lda int, rnd *rand.Rand, work [] // Dlagge generates a real general m×n matrix A, by pre- and post-multiplying // a real diagonal matrix D with random orthogonal matrices: -// A = U*D*V. +// +// A = U*D*V. // // d must have length min(m,n), and work must have length m+n, otherwise Dlagge // will panic. // // The parameters ku and kl are unused but they must satisfy -// 0 <= kl <= m-1, -// 0 <= ku <= n-1. +// +// 0 <= kl <= m-1, +// 0 <= ku <= n-1. func Dlagge(m, n, kl, ku int, d []float64, a []float64, lda int, rnd *rand.Rand, work []float64) { checkMatrix(m, n, a, lda) if kl < 0 || max(0, m-1) < kl { @@ -287,9 +295,11 @@ func Dlagge(m, n, kl, ku int, d []float64, a []float64, lda int, rnd *rand.Rand, // dlarnv fills dst with random numbers from a uniform or normal distribution // specified by dist: -// dist=1: uniform(0,1), -// dist=2: uniform(-1,1), -// dist=3: normal(0,1). +// +// dist=1: uniform(0,1), +// dist=2: uniform(-1,1), +// dist=3: normal(0,1). +// // For other values of dist dlarnv will panic. func dlarnv(dst []float64, dist int, rnd *rand.Rand) { switch dist { diff --git a/lapack/testlapack/test_matrices.go b/lapack/testlapack/test_matrices.go index d779e928..f6ce7fbe 100644 --- a/lapack/testlapack/test_matrices.go +++ b/lapack/testlapack/test_matrices.go @@ -13,9 +13,11 @@ import ( ) // A123 is the non-symmetric singular matrix -// [ 1 2 3 ] -// A = [ 4 5 6 ] -// [ 7 8 9 ] +// +// [ 1 2 3 ] +// A = [ 4 5 6 ] +// [ 7 8 9 ] +// // It has three distinct real eigenvalues. type A123 struct{} @@ -89,13 +91,17 @@ func (AntisymRandom) Eigenvalues() []complex128 { } // Circulant is a generally non-symmetric matrix given by -// A[i,j] = 1 + (j-i+n)%n. +// +// A[i,j] = 1 + (j-i+n)%n. +// // For example, for n=5, -// [ 1 2 3 4 5 ] -// [ 5 1 2 3 4 ] -// A = [ 4 5 1 2 3 ] -// [ 3 4 5 1 2 ] -// [ 2 3 4 5 1 ] +// +// [ 1 2 3 4 5 ] +// [ 5 1 2 3 4 ] +// A = [ 4 5 1 2 3 ] +// [ 3 4 5 1 2 ] +// [ 2 3 4 5 1 ] +// // It has real and complex eigenvalues, some possibly repeated. type Circulant int @@ -126,15 +132,19 @@ func (c Circulant) Eigenvalues() []complex128 { } // Clement is a generally non-symmetric matrix given by -// A[i,j] = i+1 if j == i+1, -// = n-i if j == i-1, -// = 0 otherwise. +// +// A[i,j] = i+1 if j == i+1, +// = n-i if j == i-1, +// = 0 otherwise. +// // For example, for n=5, -// [ . 1 . . . ] -// [ 4 . 2 . . ] -// A = [ . 3 . 3 . ] -// [ . . 2 . 4 ] -// [ . . . 1 . ] +// +// [ . 1 . . . ] +// [ 4 . 2 . . ] +// A = [ . 3 . 3 . ] +// [ . . 2 . 4 ] +// [ . . . 1 . ] +// // It has n distinct real eigenvalues. type Clement int @@ -162,14 +172,18 @@ func (c Clement) Eigenvalues() []complex128 { } // Creation is a singular non-symmetric matrix given by -// A[i,j] = i if j == i-1, -// = 0 otherwise. +// +// A[i,j] = i if j == i-1, +// = 0 otherwise. +// // For example, for n=5, -// [ . . . . . ] -// [ 1 . . . . ] -// A = [ . 2 . . . ] -// [ . . 3 . . ] -// [ . . . 4 . ] +// +// [ . . . . . ] +// [ 1 . . . . ] +// A = [ . 2 . . . ] +// [ . . 3 . . ] +// [ . . . 4 . ] +// // Zero is its only eigenvalue. type Creation int @@ -187,14 +201,18 @@ func (c Creation) Eigenvalues() []complex128 { } // Diagonal is a diagonal matrix given by -// A[i,j] = i+1 if i == j, -// = 0 otherwise. +// +// A[i,j] = i+1 if i == j, +// = 0 otherwise. +// // For example, for n=5, -// [ 1 . . . . ] -// [ . 2 . . . ] -// A = [ . . 3 . . ] -// [ . . . 4 . ] -// [ . . . . 5 ] +// +// [ 1 . . . . ] +// [ . 2 . . . ] +// A = [ . . 3 . . ] +// [ . . . 4 . ] +// [ . . . . 5 ] +// // It has n real eigenvalues {1,...,n}. type Diagonal int @@ -217,14 +235,18 @@ func (d Diagonal) Eigenvalues() []complex128 { } // Downshift is a non-singular upper Hessenberg matrix given by -// A[i,j] = 1 if (i-j+n)%n == 1, -// = 0 otherwise. +// +// A[i,j] = 1 if (i-j+n)%n == 1, +// = 0 otherwise. +// // For example, for n=5, -// [ . . . . 1 ] -// [ 1 . . . . ] -// A = [ . 1 . . . ] -// [ . . 1 . . ] -// [ . . . 1 . ] +// +// [ . . . . 1 ] +// [ 1 . . . . ] +// A = [ . 1 . . . ] +// [ . . 1 . . ] +// [ . . . 1 . ] +// // Its eigenvalues are the complex roots of unity. type Downshift int @@ -244,11 +266,12 @@ func (d Downshift) Eigenvalues() []complex128 { // Fibonacci is an upper Hessenberg matrix with 3 distinct real eigenvalues. For // example, for n=5, -// [ . 1 . . . ] -// [ 1 1 . . . ] -// A = [ . 1 1 . . ] -// [ . . 1 1 . ] -// [ . . . 1 1 ] +// +// [ . 1 . . . ] +// [ 1 1 . . . ] +// A = [ . 1 1 . . ] +// [ . . 1 1 . ] +// [ . . . 1 1 ] type Fibonacci int func (f Fibonacci) Matrix() blas64.General { @@ -281,11 +304,12 @@ func (f Fibonacci) Eigenvalues() []complex128 { // Gear is a singular non-symmetric matrix with real eigenvalues. For example, // for n=5, -// [ . 1 . . 1 ] -// [ 1 . 1 . . ] -// A = [ . 1 . 1 . ] -// [ . . 1 . 1 ] -// [-1 . . 1 . ] +// +// [ . 1 . . 1 ] +// [ 1 . 1 . . ] +// A = [ . 1 . 1 . ] +// [ . . 1 . 1 ] +// [-1 . . 1 . ] type Gear int func (g Gear) Matrix() blas64.General { @@ -336,15 +360,19 @@ func (g Gear) Eigenvalues() []complex128 { } // Grcar is an upper Hessenberg matrix given by -// A[i,j] = -1 if i == j+1, -// = 1 if i <= j and j <= i+k, -// = 0 otherwise. +// +// A[i,j] = -1 if i == j+1, +// = 1 if i <= j and j <= i+k, +// = 0 otherwise. +// // For example, for n=5 and k=2, -// [ 1 1 1 . . ] -// [ -1 1 1 1 . ] -// A = [ . -1 1 1 1 ] -// [ . . -1 1 1 ] -// [ . . . -1 1 ] +// +// [ 1 1 1 . . ] +// [ -1 1 1 1 . ] +// A = [ . -1 1 1 1 ] +// [ . . -1 1 1 ] +// [ . . . -1 1 ] +// // The matrix has sensitive eigenvalues but they are not given explicitly. type Grcar struct { N int @@ -370,10 +398,12 @@ func (Grcar) Eigenvalues() []complex128 { } // Hanowa is a non-symmetric non-singular matrix of even order given by -// A[i,j] = alpha if i == j, -// = -i-1 if i < n/2 and j == i + n/2, -// = i+1-n/2 if i >= n/2 and j == i - n/2, -// = 0 otherwise. +// +// A[i,j] = alpha if i == j, +// = -i-1 if i < n/2 and j == i + n/2, +// = i+1-n/2 if i >= n/2 and j == i - n/2, +// = 0 otherwise. +// // The matrix has complex eigenvalues. type Hanowa struct { N int // Order of the matrix, must be even. @@ -412,15 +442,19 @@ func (h Hanowa) Eigenvalues() []complex128 { } // Lesp is a tridiagonal, generally non-symmetric matrix given by -// A[i,j] = -2*i-5 if i == j, -// = 1/(i+1) if i == j-1, -// = j+1 if i == j+1. +// +// A[i,j] = -2*i-5 if i == j, +// = 1/(i+1) if i == j-1, +// = j+1 if i == j+1. +// // For example, for n=5, -// [ -5 2 . . . ] -// [ 1/2 -7 3 . . ] -// A = [ . 1/3 -9 4 . ] -// [ . . 1/4 -11 5 ] -// [ . . . 1/5 -13 ]. +// +// [ -5 2 . . . ] +// [ 1/2 -7 3 . . ] +// A = [ . 1/3 -9 4 . ] +// [ . . 1/4 -11 5 ] +// [ . . . 1/5 -13 ]. +// // The matrix has sensitive eigenvalues but they are not given explicitly. type Lesp int @@ -444,10 +478,12 @@ func (Lesp) Eigenvalues() []complex128 { } // Rutis is the 4×4 non-symmetric matrix -// [ 4 -5 0 3 ] -// A = [ 0 4 -3 -5 ] -// [ 5 -3 4 0 ] -// [ 3 0 5 4 ] +// +// [ 4 -5 0 3 ] +// A = [ 0 4 -3 -5 ] +// [ 5 -3 4 0 ] +// [ 3 0 5 4 ] +// // It has two distinct real eigenvalues and a pair of complex eigenvalues. type Rutis struct{} @@ -470,9 +506,11 @@ func (Rutis) Eigenvalues() []complex128 { } // Tris is a tridiagonal matrix given by -// A[i,j] = x if i == j-1, -// = y if i == j, -// = z if i == j+1. +// +// A[i,j] = x if i == j-1, +// = y if i == j, +// = z if i == j+1. +// // If x*z is negative, the matrix has complex eigenvalues. type Tris struct { N int diff --git a/mat/band.go b/mat/band.go index d6bfdfd9..7660cdaa 100644 --- a/mat/band.go +++ b/mat/band.go @@ -119,19 +119,22 @@ func (t TransposeBand) UntransposeBand() Banded { // // The data must be arranged in row-major order constructed by removing the zeros // from the rows outside the band and aligning the diagonals. For example, the matrix -// 1 2 3 0 0 0 -// 4 5 6 7 0 0 -// 0 8 9 10 11 0 -// 0 0 12 13 14 15 -// 0 0 0 16 17 18 -// 0 0 0 0 19 20 +// +// 1 2 3 0 0 0 +// 4 5 6 7 0 0 +// 0 8 9 10 11 0 +// 0 0 12 13 14 15 +// 0 0 0 16 17 18 +// 0 0 0 0 19 20 +// // becomes (* entries are never accessed) -// * 1 2 3 +// - 1 2 3 // 4 5 6 7 // 8 9 10 11 -// 12 13 14 15 -// 16 17 18 * -// 19 20 * * +// 12 13 14 15 +// 16 17 18 * +// 19 20 * * +// // which is passed to NewBandDense as []float64{*, 1, 2, 3, 4, ...} with kl=1 and ku=2. // Only the values in the band portion of the matrix are used. func NewBandDense(r, c, kl, ku int, data []float64) *BandDense { @@ -291,9 +294,10 @@ func (b *BandDense) Zero() { } // Norm returns the specified norm of the receiver. Valid norms are: -// 1 - The maximum absolute column sum -// 2 - The Frobenius norm, the square root of the sum of the squares of the elements -// Inf - The maximum absolute row sum +// +// 1 - The maximum absolute column sum +// 2 - The Frobenius norm, the square root of the sum of the squares of the elements +// Inf - The maximum absolute row sum // // Norm will panic with ErrNormOrder if an illegal norm is specified and with // ErrZeroLength if the matrix has zero size. diff --git a/mat/cholesky.go b/mat/cholesky.go index d2deea56..0f957cdd 100644 --- a/mat/cholesky.go +++ b/mat/cholesky.go @@ -316,7 +316,9 @@ func (c *Cholesky) RawU() Triangular { // UTo stores into dst the n×n upper triangular matrix U from a Cholesky // decomposition -// A = Uᵀ * U. +// +// A = Uᵀ * U. +// // If dst is empty, it is resized to be an n×n upper triangular matrix. When dst // is non-empty, UTo panics if dst is not n×n or not Upper. UTo will also panic // if the receiver does not contain a successful factorization. @@ -341,7 +343,9 @@ func (c *Cholesky) UTo(dst *TriDense) { // LTo stores into dst the n×n lower triangular matrix L from a Cholesky // decomposition -// A = L * Lᵀ. +// +// A = L * Lᵀ. +// // If dst is empty, it is resized to be an n×n lower triangular matrix. When dst // is non-empty, LTo panics if dst is not n×n or not Lower. LTo will also panic // if the receiver does not contain a successful factorization. @@ -446,9 +450,13 @@ func (c *Cholesky) InverseTo(dst *SymDense) error { // Scale multiplies the original matrix A by a positive constant using // its Cholesky decomposition, storing the result in-place into the receiver. // That is, if the original Cholesky factorization is -// Uᵀ * U = A +// +// Uᵀ * U = A +// // the updated factorization is -// U'ᵀ * U' = f A = A' +// +// U'ᵀ * U' = f A = A' +// // Scale panics if the constant is non-positive, or if the receiver is non-empty // and is of a different size from the input. func (c *Cholesky) Scale(f float64, orig *Cholesky) { @@ -470,8 +478,10 @@ func (c *Cholesky) Scale(f float64, orig *Cholesky) { // ExtendVecSym computes the Cholesky decomposition of the original matrix A, // whose Cholesky decomposition is in a, extended by a the n×1 vector v according to -// [A w] -// [w' k] +// +// [A w] +// [w' k] +// // where k = v[n-1] and w = v[:n-1]. The result is stored into the receiver. // In order for the updated matrix to be positive definite, it must be the case // that k > w' A^-1 w. If this condition does not hold then ExtendVecSym will @@ -533,9 +543,12 @@ func (c *Cholesky) ExtendVecSym(a *Cholesky, v Vector) (ok bool) { // SymRankOne performs a rank-1 update of the original matrix A and refactorizes // its Cholesky factorization, storing the result into the receiver. That is, if // in the original Cholesky factorization -// Uᵀ * U = A, +// +// Uᵀ * U = A, +// // in the updated factorization -// U'ᵀ * U' = A + alpha * x * xᵀ = A'. +// +// U'ᵀ * U' = A + alpha * x * xᵀ = A'. // // Note that when alpha is negative, the updating problem may be ill-conditioned // and the results may be inaccurate, or the updated matrix A' may not be diff --git a/mat/dense.go b/mat/dense.go index f2245c5b..5ec425fa 100644 --- a/mat/dense.go +++ b/mat/dense.go @@ -594,9 +594,10 @@ func (m *Dense) Trace() float64 { } // Norm returns the specified norm of the receiver. Valid norms are: -// 1 - The maximum absolute column sum -// 2 - The Frobenius norm, the square root of the sum of the squares of the elements -// Inf - The maximum absolute row sum +// +// 1 - The maximum absolute column sum +// 2 - The Frobenius norm, the square root of the sum of the squares of the elements +// Inf - The maximum absolute row sum // // Norm will panic with ErrNormOrder if an illegal norm is specified and with // ErrShape if the matrix has zero size. diff --git a/mat/dense_arithmetic.go b/mat/dense_arithmetic.go index d0c66509..259ee13d 100644 --- a/mat/dense_arithmetic.go +++ b/mat/dense_arithmetic.go @@ -777,7 +777,8 @@ func (m *Dense) Apply(fn func(i, j int, v float64) float64, a Matrix) { // RankOne performs a rank-one update to the matrix a with the vectors x and // y, where x and y are treated as column vectors. The result is stored in the // receiver. The Outer method can be used instead of RankOne if a is not needed. -// m = a + alpha * x * yᵀ +// +// m = a + alpha * x * yᵀ func (m *Dense) RankOne(a Matrix, alpha float64, x, y Vector) { ar, ac := a.Dims() if x.Len() != ar { @@ -832,7 +833,9 @@ func (m *Dense) RankOne(a Matrix, alpha float64, x, y Vector) { // Outer calculates the outer product of the vectors x and y, where x and y // are treated as column vectors, and stores the result in the receiver. -// m = alpha * x * yᵀ +// +// m = alpha * x * yᵀ +// // In order to update an existing matrix, see RankOne. func (m *Dense) Outer(alpha float64, x, y Vector) { r, c := x.Len(), y.Len() diff --git a/mat/diagonal.go b/mat/diagonal.go index 830488b7..c42f70c8 100644 --- a/mat/diagonal.go +++ b/mat/diagonal.go @@ -319,9 +319,10 @@ func (d *DiagDense) Trace() float64 { } // Norm returns the specified norm of the receiver. Valid norms are: -// 1 or Inf - The maximum diagonal element magnitude -// 2 - The Frobenius norm, the square root of the sum of the squares of -// the diagonal elements +// +// 1 or Inf - The maximum diagonal element magnitude +// 2 - The Frobenius norm, the square root of the sum of the squares of +// the diagonal elements // // Norm will panic with ErrNormOrder if an illegal norm is specified and with // ErrZeroLength if the receiver has zero size. diff --git a/mat/doc.go b/mat/doc.go index fcd19122..f8c078cf 100644 --- a/mat/doc.go +++ b/mat/doc.go @@ -5,39 +5,48 @@ // Package mat provides implementations of float64 and complex128 matrix // structures and linear algebra operations on them. // -// Overview +// # Overview // // This section provides a quick overview of the mat package. The following // sections provide more in depth commentary. // // mat provides: -// - Interfaces for Matrix classes (Matrix, Symmetric, Triangular) -// - Concrete implementations (Dense, SymDense, TriDense, VecDense) -// - Methods and functions for using matrix data (Add, Trace, SymRankOne) -// - Types for constructing and using matrix factorizations (QR, LU, etc.) -// - The complementary types for complex matrices, CMatrix, CSymDense, etc. +// - Interfaces for Matrix classes (Matrix, Symmetric, Triangular) +// - Concrete implementations (Dense, SymDense, TriDense, VecDense) +// - Methods and functions for using matrix data (Add, Trace, SymRankOne) +// - Types for constructing and using matrix factorizations (QR, LU, etc.) +// - The complementary types for complex matrices, CMatrix, CSymDense, etc. +// // In the documentation below, we use "matrix" as a short-hand for all of // the FooDense types implemented in this package. We use "Matrix" to // refer to the Matrix interface. // // A matrix may be constructed through the corresponding New function. If no // backing array is provided the matrix will be initialized to all zeros. -// // Allocate a zeroed real matrix of size 3×5 -// zero := mat.NewDense(3, 5, nil) +// +// // Allocate a zeroed real matrix of size 3×5 +// zero := mat.NewDense(3, 5, nil) +// // If a backing data slice is provided, the matrix will have those elements. // All matrices are stored in row-major format and users should consider // this when expressing matrix arithmetic to ensure optimal performance. -// // Generate a 6×6 matrix of random values. -// data := make([]float64, 36) -// for i := range data { -// data[i] = rand.NormFloat64() -// } -// a := mat.NewDense(6, 6, data) +// +// // Generate a 6×6 matrix of random values. +// data := make([]float64, 36) +// for i := range data { +// data[i] = rand.NormFloat64() +// } +// a := mat.NewDense(6, 6, data) +// // Operations involving matrix data are implemented as functions when the values // of the matrix remain unchanged -// tr := mat.Trace(a) +// +// tr := mat.Trace(a) +// // and are implemented as methods when the operation modifies the receiver. -// zero.Copy(a) +// +// zero.Copy(a) +// // Note that the input arguments to most functions and methods are interfaces // rather than concrete types `func Trace(Matrix)` rather than // `func Trace(*Dense)` allowing flexible use of internal and external @@ -47,7 +56,7 @@ // the operation will panic if the matrix is not the correct size. // An exception to this is when the destination is empty (see below). // -// Empty matrix +// # Empty matrix // // An empty matrix is one that has zero size. Empty matrices are used to allow // the destination of a matrix operation to assume the correct size automatically. @@ -55,15 +64,17 @@ // new data if necessary. The IsEmpty method returns whether the given matrix // is empty. The zero-value of a matrix is empty, and is useful for easily // getting the result of matrix operations. -// var c mat.Dense // construct a new zero-value matrix -// c.Mul(a, a) // c is automatically adjusted to be the right size +// +// var c mat.Dense // construct a new zero-value matrix +// c.Mul(a, a) // c is automatically adjusted to be the right size +// // The Reset method can be used to revert a matrix to an empty matrix. // Reset should not be used when multiple different matrices share the same backing // data slice. This can cause unexpected data modifications after being resized. // An empty matrix can not be sliced even if it does have an adequately sized // backing data slice, but can be expanded using its Grow method if it exists. // -// The Matrix Interfaces +// # The Matrix Interfaces // // The Matrix interface is the common link between the concrete types of real // matrices. The Matrix interface is defined by three functions: Dims, which @@ -71,11 +82,15 @@ // specified location, and T for returning a Transpose (discussed later). All of // the matrix types can perform these behaviors and so implement the interface. // Methods and functions are designed to use this interface, so in particular the method -// func (m *Dense) Mul(a, b Matrix) +// +// func (m *Dense) Mul(a, b Matrix) +// // constructs a *Dense from the result of a multiplication with any Matrix types, // not just *Dense. Where more restrictive requirements must be met, there are also // additional interfaces like Symmetric and Triangular. For example, in -// func (s *SymDense) AddSym(a, b Symmetric) +// +// func (s *SymDense) AddSym(a, b Symmetric) +// // the Symmetric interface guarantees a symmetric result. // // The CMatrix interface plays the same role for complex matrices. The difference @@ -90,20 +105,22 @@ // see the Transpose and Conjugate types for more details. Note that some // operations have a transpose as part of their definition, as in *SymDense.SymOuterK. // -// Matrix Factorization +// # Matrix Factorization // // Matrix factorizations, such as the LU decomposition, typically have their own // specific data storage, and so are each implemented as a specific type. The // factorization can be computed through a call to Factorize -// var lu mat.LU -// lu.Factorize(a) +// +// var lu mat.LU +// lu.Factorize(a) +// // The elements of the factorization can be extracted through methods on the // factorized type, for example *LU.UTo. The factorization types can also be used // directly, as in *Cholesky.SolveTo. Some factorizations can be updated directly, // without needing to update the original matrix and refactorize, for example with // *LU.RankOne. // -// BLAS and LAPACK +// # BLAS and LAPACK // // BLAS and LAPACK are the standard APIs for linear algebra routines. Many // operations in mat are implemented using calls to the wrapper functions @@ -115,12 +132,14 @@ // a cgo BLAS implementation is registered, the lapack64 calls will be partially // executed in Go and partially executed in C. // -// Type Switching +// # Type Switching // // The Matrix abstraction enables efficiency as well as interoperability. Go's // type reflection capabilities are used to choose the most efficient routine // given the specific concrete types. For example, in -// c.Mul(a, b) +// +// c.Mul(a, b) +// // if a and b both implement RawMatrixer, that is, they can be represented as a // blas64.General, blas64.Gemm (general matrix multiplication) is called, while // instead if b is a RawSymmetricer blas64.Symm is used (general-symmetric @@ -132,19 +151,21 @@ // value. If there are specific special cases that are needed, please submit a // pull-request or file an issue. // -// Invariants +// # Invariants // // Matrix input arguments to package functions are never directly modified. If an // operation changes Matrix data, the mutated matrix will be the receiver of a // method, or will be the first, dst, argument to a method named with a To suffix. // // For convenience, a matrix may be used as both a receiver and as an input, e.g. -// a.Pow(a, 6) -// v.SolveVec(a.T(), v) +// +// a.Pow(a, 6) +// v.SolveVec(a.T(), v) +// // though in many cases this will cause an allocation (see Element Aliasing). // An exception to this rule is Copy, which does not allow a.Copy(a.T()). // -// Element Aliasing +// # Element Aliasing // // Most methods in mat modify receiver data. It is forbidden for the modified // data region of the receiver to overlap the used data area of the input @@ -163,17 +184,17 @@ // // mat will use the following rules to detect overlap between the receiver and one // of the inputs: -// - the input implements one of the Raw methods, and -// - the address ranges of the backing data slices overlap, and -// - the strides differ or there is an overlap in the used data elements. +// - the input implements one of the Raw methods, and +// - the address ranges of the backing data slices overlap, and +// - the strides differ or there is an overlap in the used data elements. +// // If such an overlap is detected, the method will panic. // // The following cases will not panic: -// - the data slices do not overlap, -// - there is pointer identity between the receiver and input values after -// the value has been untransposed if necessary. +// - the data slices do not overlap, +// - there is pointer identity between the receiver and input values after +// the value has been untransposed if necessary. // // mat will not attempt to detect element overlap if the input does not implement a // Raw method. Method behavior is undefined if there is undetected overlap. -// package mat // import "gonum.org/v1/gonum/mat" diff --git a/mat/eigen.go b/mat/eigen.go index 85c56fc5..69f8eb57 100644 --- a/mat/eigen.go +++ b/mat/eigen.go @@ -25,7 +25,9 @@ type EigenSym struct { // Factorize computes the eigenvalue decomposition of the symmetric matrix a. // The Eigen decomposition is defined as -// A = P * D * P^-1 +// +// A = P * D * P^-1 +// // where D is a diagonal matrix containing the eigenvalues of the matrix, and // P is a matrix of the eigenvectors of A. Factorize computes the eigenvalues // in ascending order. If the vectors input argument is false, the eigenvectors @@ -150,12 +152,16 @@ func (e *Eigen) succFact() bool { // the eigenvectors. // // A right eigenvalue/eigenvector combination is defined by -// A * x_r = λ * x_r +// +// A * x_r = λ * x_r +// // where x_r is the column vector called an eigenvector, and λ is the corresponding // eigenvalue. // // Similarly, a left eigenvalue/eigenvector combination is defined by -// x_l * A = λ * x_l +// +// x_l * A = λ * x_l +// // The eigenvalues, but not the eigenvectors, are the same for both decompositions. // // Typically eigenvectors refer to right eigenvectors. @@ -274,11 +280,15 @@ func (e *Eigen) Values(dst []complex128) []complex128 { // The columns of the returned n×n dense matrix contain the eigenvectors of the // decomposition in the same order as the eigenvalues. // If the j-th eigenvalue is real, then -// dst[:,j] = d[:,j], +// +// dst[:,j] = d[:,j], +// // and if it is not real, then the elements of the j-th and (j+1)-th columns of d // form complex conjugate pairs and the eigenvectors are recovered as -// dst[:,j] = d[:,j] + i*d[:,j+1], -// dst[:,j+1] = d[:,j] - i*d[:,j+1], +// +// dst[:,j] = d[:,j] + i*d[:,j+1], +// dst[:,j+1] = d[:,j] - i*d[:,j+1], +// // where i is the imaginary unit. func (e *Eigen) complexEigenTo(dst *CDense, d *Dense) { r, c := d.Dims() diff --git a/mat/gsvd.go b/mat/gsvd.go index d3639026..02286207 100644 --- a/mat/gsvd.go +++ b/mat/gsvd.go @@ -62,9 +62,11 @@ func (gsvd *GSVD) succFact() bool { // input kind. // // The full singular value decomposition (kind == GSVDAll) deconstructs A and B as -// A = U * Σ₁ * [ 0 R ] * Qᵀ // -// B = V * Σ₂ * [ 0 R ] * Qᵀ +// A = U * Σ₁ * [ 0 R ] * Qᵀ +// +// B = V * Σ₂ * [ 0 R ] * Qᵀ +// // where Σ₁ and Σ₂ are r×(k+l) and p×(k+l) diagonal matrices of singular values, and // U, V and Q are r×r, p×p and c×c orthogonal matrices of singular vectors. k+l is the // effective numerical rank of the matrix [ Aᵀ Bᵀ ]ᵀ. diff --git a/mat/hogsvd.go b/mat/hogsvd.go index ab53685d..40a03315 100644 --- a/mat/hogsvd.go +++ b/mat/hogsvd.go @@ -33,12 +33,12 @@ func (gsvd *HOGSVD) succFact() bool { // of the n input r_i×c column tall matrices in m. HOGSV extends the GSVD case from 2 to n // input matrices. // -// M_0 = U_0 * Σ_0 * Vᵀ -// M_1 = U_1 * Σ_1 * Vᵀ -// . -// . -// . -// M_{n-1} = U_{n-1} * Σ_{n-1} * Vᵀ +// M_0 = U_0 * Σ_0 * Vᵀ +// M_1 = U_1 * Σ_1 * Vᵀ +// . +// . +// . +// M_{n-1} = U_{n-1} * Σ_{n-1} * Vᵀ // // where U_i are r_i×c matrices of singular vectors, Σ are c×c matrices singular values, and V // is a c×c matrix of singular vectors. diff --git a/mat/inner.go b/mat/inner.go index 7607cf9a..4f94a96a 100644 --- a/mat/inner.go +++ b/mat/inner.go @@ -11,7 +11,9 @@ import ( ) // Inner computes the generalized inner product -// xᵀ A y +// +// xᵀ A y +// // between the vectors x and y with matrix A, where x and y are treated as // column vectors. // diff --git a/mat/io.go b/mat/io.go index 9ac04f26..0641fa28 100644 --- a/mat/io.go +++ b/mat/io.go @@ -51,20 +51,21 @@ var ( // MarshalBinary encodes the receiver into a binary form and returns the result. // // Dense is little-endian encoded as follows: -// 0 - 3 Version = 1 (uint32) -// 4 'G' (byte) -// 5 'F' (byte) -// 6 'A' (byte) -// 7 0 (byte) -// 8 - 15 number of rows (int64) -// 16 - 23 number of columns (int64) -// 24 - 31 0 (int64) -// 32 - 39 0 (int64) -// 40 - .. matrix data elements (float64) -// [0,0] [0,1] ... [0,ncols-1] -// [1,0] [1,1] ... [1,ncols-1] -// ... -// [nrows-1,0] ... [nrows-1,ncols-1] +// +// 0 - 3 Version = 1 (uint32) +// 4 'G' (byte) +// 5 'F' (byte) +// 6 'A' (byte) +// 7 0 (byte) +// 8 - 15 number of rows (int64) +// 16 - 23 number of columns (int64) +// 24 - 31 0 (int64) +// 32 - 39 0 (int64) +// 40 - .. matrix data elements (float64) +// [0,0] [0,1] ... [0,ncols-1] +// [1,0] [1,1] ... [1,ncols-1] +// ... +// [nrows-1,0] ... [nrows-1,ncols-1] func (m Dense) MarshalBinary() ([]byte, error) { bufLen := int64(headerSize) + int64(m.mat.Rows)*int64(m.mat.Cols)*int64(sizeFloat64) if bufLen <= 0 { @@ -132,10 +133,11 @@ func (m Dense) MarshalBinaryTo(w io.Writer) (int, error) { // See MarshalBinary for the on-disk layout. // // Limited checks on the validity of the binary input are performed: -// - ErrShape is returned if the number of rows or columns is negative, -// - an error is returned if the resulting Dense matrix is too -// big for the current architecture (e.g. a 16GB matrix written by a -// 64b application and read back from a 32b application.) +// - ErrShape is returned if the number of rows or columns is negative, +// - an error is returned if the resulting Dense matrix is too +// big for the current architecture (e.g. a 16GB matrix written by a +// 64b application and read back from a 32b application.) +// // UnmarshalBinary does not limit the size of the unmarshaled matrix, and so // it should not be used on untrusted data. func (m *Dense) UnmarshalBinary(data []byte) error { @@ -191,10 +193,11 @@ func (m *Dense) UnmarshalBinary(data []byte) error { // See MarshalBinary for the on-disk layout. // // Limited checks on the validity of the binary input are performed: -// - ErrShape is returned if the number of rows or columns is negative, -// - an error is returned if the resulting Dense matrix is too -// big for the current architecture (e.g. a 16GB matrix written by a -// 64b application and read back from a 32b application.) +// - ErrShape is returned if the number of rows or columns is negative, +// - an error is returned if the resulting Dense matrix is too +// big for the current architecture (e.g. a 16GB matrix written by a +// 64b application and read back from a 32b application.) +// // UnmarshalBinary does not limit the size of the unmarshaled matrix, and so // it should not be used on untrusted data. func (m *Dense) UnmarshalBinaryFrom(r io.Reader) (int, error) { @@ -247,16 +250,16 @@ func (m *Dense) UnmarshalBinaryFrom(r io.Reader) (int, error) { // // VecDense is little-endian encoded as follows: // -// 0 - 3 Version = 1 (uint32) -// 4 'G' (byte) -// 5 'F' (byte) -// 6 'A' (byte) -// 7 0 (byte) -// 8 - 15 number of elements (int64) -// 16 - 23 1 (int64) -// 24 - 31 0 (int64) -// 32 - 39 0 (int64) -// 40 - .. vector's data elements (float64) +// 0 - 3 Version = 1 (uint32) +// 4 'G' (byte) +// 5 'F' (byte) +// 6 'A' (byte) +// 7 0 (byte) +// 8 - 15 number of elements (int64) +// 16 - 23 1 (int64) +// 24 - 31 0 (int64) +// 32 - 39 0 (int64) +// 40 - .. vector's data elements (float64) func (v VecDense) MarshalBinary() ([]byte, error) { bufLen := int64(headerSize) + int64(v.mat.N)*int64(sizeFloat64) if bufLen <= 0 { @@ -318,10 +321,11 @@ func (v VecDense) MarshalBinaryTo(w io.Writer) (int, error) { // See MarshalBinary for the on-disk layout. // // Limited checks on the validity of the binary input are performed: -// - ErrShape is returned if the number of rows is negative, -// - an error is returned if the resulting VecDense is too -// big for the current architecture (e.g. a 16GB vector written by a -// 64b application and read back from a 32b application.) +// - ErrShape is returned if the number of rows is negative, +// - an error is returned if the resulting VecDense is too +// big for the current architecture (e.g. a 16GB vector written by a +// 64b application and read back from a 32b application.) +// // UnmarshalBinary does not limit the size of the unmarshaled vector, and so // it should not be used on untrusted data. func (v *VecDense) UnmarshalBinary(data []byte) error { diff --git a/mat/lq.go b/mat/lq.go index 18679da4..4d6da5ee 100644 --- a/mat/lq.go +++ b/mat/lq.go @@ -170,8 +170,10 @@ func (lq *LQ) QTo(dst *Dense) { // See the documentation for Condition for more information. // // The minimization problem solved depends on the input parameters. -// If trans == false, find the minimum norm solution of A * X = B. -// If trans == true, find X such that ||A*X - B||_2 is minimized. +// +// If trans == false, find the minimum norm solution of A * X = B. +// If trans == true, find X such that ||A*X - B||_2 is minimized. +// // The solution matrix, X, is stored in place into dst. // SolveTo will panic if the receiver does not contain a factorization. func (lq *LQ) SolveTo(dst *Dense, trans bool, b Matrix) error { diff --git a/mat/lu.go b/mat/lu.go index a70b71a4..4ec424f9 100644 --- a/mat/lu.go +++ b/mat/lu.go @@ -334,8 +334,10 @@ func (m *Dense) Permutation(r int, swaps []int) { // SolveTo solves a system of linear equations using the LU decomposition of a matrix. // It computes -// A * X = B if trans == false -// Aᵀ * X = B if trans == true +// +// A * X = B if trans == false +// Aᵀ * X = B if trans == true +// // In both cases, A is represented in LU factorized form, and the matrix X is // stored into dst. // @@ -382,8 +384,10 @@ func (lu *LU) SolveTo(dst *Dense, trans bool, b Matrix) error { // SolveVecTo solves a system of linear equations using the LU decomposition of a matrix. // It computes -// A * x = b if trans == false -// Aᵀ * x = b if trans == true +// +// A * x = b if trans == false +// Aᵀ * x = b if trans == true +// // In both cases, A is represented in LU factorized form, and the vector x is // stored into dst. // diff --git a/mat/matrix.go b/mat/matrix.go index 5ad22ca5..51093aaa 100644 --- a/mat/matrix.go +++ b/mat/matrix.go @@ -757,17 +757,19 @@ func Min(a Matrix) float64 { } // A Normer can compute a norm of the matrix. Valid norms are: -// 1 - The maximum absolute column sum -// 2 - The Frobenius norm, the square root of the sum of the squares of the elements -// Inf - The maximum absolute row sum +// +// 1 - The maximum absolute column sum +// 2 - The Frobenius norm, the square root of the sum of the squares of the elements +// Inf - The maximum absolute row sum type Normer interface { Norm(norm float64) float64 } // Norm returns the specified norm of the matrix A. Valid norms are: -// 1 - The maximum absolute column sum -// 2 - The Frobenius norm, the square root of the sum of the squares of the elements -// Inf - The maximum absolute row sum +// +// 1 - The maximum absolute column sum +// 2 - The Frobenius norm, the square root of the sum of the squares of the elements +// Inf - The maximum absolute row sum // // If a is a Normer, its Norm method will be used to calculate the norm. // diff --git a/mat/qr.go b/mat/qr.go index 510a02ce..f54bcc86 100644 --- a/mat/qr.go +++ b/mat/qr.go @@ -165,8 +165,10 @@ func (qr *QR) QTo(dst *Dense) { // See the documentation for Condition for more information. // // The minimization problem solved depends on the input parameters. -// If trans == false, find X such that ||A*X - B||_2 is minimized. -// If trans == true, find the minimum norm solution of Aᵀ * X = B. +// +// If trans == false, find X such that ||A*X - B||_2 is minimized. +// If trans == true, find the minimum norm solution of Aᵀ * X = B. +// // The solution matrix, X, is stored in place into dst. // SolveTo will panic if the receiver does not contain a factorization. func (qr *QR) SolveTo(dst *Dense, trans bool, b Matrix) error { @@ -232,7 +234,9 @@ func (qr *QR) SolveTo(dst *Dense, trans bool, b Matrix) error { } // SolveVecTo finds a minimum-norm solution to a system of linear equations, -// Ax = b. +// +// Ax = b. +// // See QR.SolveTo for the full documentation. // SolveVecTo will panic if the receiver does not contain a factorization. func (qr *QR) SolveVecTo(dst *VecDense, trans bool, b Vector) error { diff --git a/mat/solve.go b/mat/solve.go index 8ac380fe..48331a23 100644 --- a/mat/solve.go +++ b/mat/solve.go @@ -11,10 +11,13 @@ import ( ) // Solve solves the linear least squares problem -// minimize over x |b - A*x|_2 +// +// minimize over x |b - A*x|_2 +// // where A is an m×n matrix A, b is a given m element vector and x is n element // solution vector. Solve assumes that A has full rank, that is -// rank(A) = min(m,n) +// +// rank(A) = min(m,n) // // If m >= n, Solve finds the unique least squares solution of an overdetermined // system. @@ -115,10 +118,13 @@ func (m *Dense) Solve(a, b Matrix) error { } // SolveVec solves the linear least squares problem -// minimize over x |b - A*x|_2 +// +// minimize over x |b - A*x|_2 +// // where A is an m×n matrix A, b is a given m element vector and x is n element // solution vector. Solve assumes that A has full rank, that is -// rank(A) = min(m,n) +// +// rank(A) = min(m,n) // // If m >= n, Solve finds the unique least squares solution of an overdetermined // system. diff --git a/mat/svd.go b/mat/svd.go index 648015dc..5244d9f6 100644 --- a/mat/svd.go +++ b/mat/svd.go @@ -57,7 +57,9 @@ func (svd *SVD) succFact() bool { // // The full singular value decomposition (kind == SVDFull) is a factorization // of an m×n matrix A of the form -// A = U * Σ * Vᵀ +// +// A = U * Σ * Vᵀ +// // where Σ is an m×n diagonal matrix, U is an m×m orthogonal matrix, and V is an // n×n orthogonal matrix. The diagonal elements of Σ are the singular values of A. // The first min(m,n) columns of U and V are, respectively, the left and right @@ -66,7 +68,9 @@ func (svd *SVD) succFact() bool { // Significant storage space can be saved by using the thin representation of // the SVD (kind == SVDThin) instead of the full SVD, especially if // m >> n or m << n. The thin SVD finds -// A = U~ * Σ * V~ᵀ +// +// A = U~ * Σ * V~ᵀ +// // where U~ is of size m×min(m,n), Σ is a diagonal matrix of size min(m,n)×min(m,n) // and V~ is of size n×min(m,n). // @@ -273,10 +277,14 @@ func (svd *SVD) VTo(dst *Dense) { } // SolveTo calculates the minimum-norm solution to a linear least squares problem -// minimize over n-element vectors x: |b - A*x|_2 and |x|_2 +// +// minimize over n-element vectors x: |b - A*x|_2 and |x|_2 +// // where b is a given m-element vector, using the SVD of m×n matrix A stored in // the receiver. A may be rank-deficient, that is, the given effective rank can be -// rank ≤ min(m,n) +// +// rank ≤ min(m,n) +// // The rank can be computed using SVD.Rank. // // Several right-hand side vectors b and solution vectors x can be handled in a @@ -353,10 +361,14 @@ func (m repVector) At(i, j int) float64 { func (m repVector) T() Matrix { return Transpose{m} } // SolveVecTo calculates the minimum-norm solution to a linear least squares problem -// minimize over n-element vectors x: |b - A*x|_2 and |x|_2 +// +// minimize over n-element vectors x: |b - A*x|_2 and |x|_2 +// // where b is a given m-element vector, using the SVD of m×n matrix A stored in // the receiver. A may be rank-deficient, that is, the given effective rank can be -// rank ≤ min(m,n) +// +// rank ≤ min(m,n) +// // The rank can be computed using SVD.Rank. // // The resulting vector x will be stored in dst. dst must be either empty or diff --git a/mat/symband.go b/mat/symband.go index 6d7b00be..63638ea9 100644 --- a/mat/symband.go +++ b/mat/symband.go @@ -67,19 +67,23 @@ type RawSymBander interface { // The data must be arranged in row-major order constructed by removing the zeros // from the rows outside the band and aligning the diagonals. SymBandDense matrices // are stored in the upper triangle. For example, the matrix -// 1 2 3 0 0 0 -// 2 4 5 6 0 0 -// 3 5 7 8 9 0 -// 0 6 8 10 11 12 -// 0 0 9 11 13 14 -// 0 0 0 12 14 15 +// +// 1 2 3 0 0 0 +// 2 4 5 6 0 0 +// 3 5 7 8 9 0 +// 0 6 8 10 11 12 +// 0 0 9 11 13 14 +// 0 0 0 12 14 15 +// // becomes (* entries are never accessed) -// 1 2 3 -// 4 5 6 -// 7 8 9 -// 10 11 12 -// 13 14 * -// 15 * * +// +// 1 2 3 +// 4 5 6 +// 7 8 9 +// 10 11 12 +// 13 14 * +// 15 * * +// // which is passed to NewSymBandDense as []float64{1, 2, ..., 15, *, *, *} with k=2. // Only the values in the band portion of the matrix are used. func NewSymBandDense(n, k int, data []float64) *SymBandDense { @@ -245,9 +249,10 @@ func (s *SymBandDense) DoColNonZero(j int, fn func(i, j int, v float64)) { } // Norm returns the specified norm of the receiver. Valid norms are: -// 1 - The maximum absolute column sum -// 2 - The Frobenius norm, the square root of the sum of the squares of the elements -// Inf - The maximum absolute row sum +// +// 1 - The maximum absolute column sum +// 2 - The Frobenius norm, the square root of the sum of the squares of the elements +// Inf - The maximum absolute row sum // // Norm will panic with ErrNormOrder if an illegal norm is specified and with // ErrZeroLength if the matrix has zero size. diff --git a/mat/symmetric.go b/mat/symmetric.go index ec9b75cf..e38e4c7b 100644 --- a/mat/symmetric.go +++ b/mat/symmetric.go @@ -322,7 +322,8 @@ func (s *SymDense) CopySym(a Symmetric) int { // SymRankOne performs a symmetric rank-one update to the matrix a with x, // which is treated as a column vector, and stores the result in the receiver -// s = a + alpha * x * xᵀ +// +// s = a + alpha * x * xᵀ func (s *SymDense) SymRankOne(a Symmetric, alpha float64, x Vector) { n := x.Len() if a.SymmetricDim() != n { @@ -355,7 +356,8 @@ func (s *SymDense) SymRankOne(a Symmetric, alpha float64, x Vector) { // SymRankK performs a symmetric rank-k update to the matrix a and stores the // result into the receiver. If a is zero, see SymOuterK. -// s = a + alpha * x * x' +// +// s = a + alpha * x * x' func (s *SymDense) SymRankK(a Symmetric, alpha float64, x Matrix) { n := a.SymmetricDim() r, _ := x.Dims() @@ -387,7 +389,9 @@ func (s *SymDense) SymRankK(a Symmetric, alpha float64, x Matrix) { // SymOuterK calculates the outer product of x with itself and stores // the result into the receiver. It is equivalent to the matrix // multiplication -// s = alpha * x * x'. +// +// s = alpha * x * x'. +// // In order to update an existing matrix, see SymRankOne. func (s *SymDense) SymOuterK(alpha float64, x Matrix) { n, _ := x.Dims() @@ -433,7 +437,8 @@ func (s *SymDense) SymOuterK(alpha float64, x Matrix) { // RankTwo performs a symmetric rank-two update to the matrix a with the // vectors x and y, which are treated as column vectors, and stores the // result in the receiver -// m = a + alpha * (x * yᵀ + y * xᵀ) +// +// m = a + alpha * (x * yᵀ + y * xᵀ) func (s *SymDense) RankTwo(a Symmetric, alpha float64, x, y Vector) { n := s.mat.N if x.Len() != n { @@ -580,9 +585,10 @@ func (s *SymDense) sliceSym(i, k int) *SymDense { } // Norm returns the specified norm of the receiver. Valid norms are: -// 1 - The maximum absolute column sum -// 2 - The Frobenius norm, the square root of the sum of the squares of the elements -// Inf - The maximum absolute row sum +// +// 1 - The maximum absolute column sum +// 2 - The Frobenius norm, the square root of the sum of the squares of the elements +// Inf - The maximum absolute row sum // // Norm will panic with ErrNormOrder if an illegal norm is specified and with // ErrZeroLength if the matrix has zero size. diff --git a/mat/triangular.go b/mat/triangular.go index 581b5d8d..743fd38c 100644 --- a/mat/triangular.go +++ b/mat/triangular.go @@ -622,9 +622,10 @@ func (t *TriDense) sliceTri(i, k int) *TriDense { } // Norm returns the specified norm of the receiver. Valid norms are: -// 1 - The maximum absolute column sum -// 2 - The Frobenius norm, the square root of the sum of the squares of the elements -// Inf - The maximum absolute row sum +// +// 1 - The maximum absolute column sum +// 2 - The Frobenius norm, the square root of the sum of the squares of the elements +// Inf - The maximum absolute row sum // // Norm will panic with ErrNormOrder if an illegal norm is specified and with // ErrZeroLength if the matrix has zero size. diff --git a/mat/triband.go b/mat/triband.go index 73495ac4..9d6b3401 100644 --- a/mat/triband.go +++ b/mat/triband.go @@ -168,35 +168,42 @@ type TriBandDense struct { // The data must be arranged in row-major order constructed by removing the zeros // from the rows outside the band and aligning the diagonals. For example, if // the upper-triangular banded matrix -// 1 2 3 0 0 0 -// 0 4 5 6 0 0 -// 0 0 7 8 9 0 -// 0 0 0 10 11 12 -// 0 0 0 0 13 14 -// 0 0 0 0 0 15 +// +// 1 2 3 0 0 0 +// 0 4 5 6 0 0 +// 0 0 7 8 9 0 +// 0 0 0 10 11 12 +// 0 0 0 0 13 14 +// 0 0 0 0 0 15 +// // becomes (* entries are never accessed) -// 1 2 3 -// 4 5 6 -// 7 8 9 -// 10 11 12 -// 13 14 * -// 15 * * +// +// 1 2 3 +// 4 5 6 +// 7 8 9 +// 10 11 12 +// 13 14 * +// 15 * * +// // which is passed to NewTriBandDense as []float64{1, 2, ..., 15, *, *, *} // with k=2 and kind = mat.Upper. // The lower triangular banded matrix -// 1 0 0 0 0 0 -// 2 3 0 0 0 0 -// 4 5 6 0 0 0 -// 0 7 8 9 0 0 -// 0 0 10 11 12 0 -// 0 0 0 13 14 15 +// +// 1 0 0 0 0 0 +// 2 3 0 0 0 0 +// 4 5 6 0 0 0 +// 0 7 8 9 0 0 +// 0 0 10 11 12 0 +// 0 0 0 13 14 15 +// // becomes (* entries are never accessed) -// * * 1 -// * 2 3 +// - * 1 +// - 2 3 // 4 5 6 // 7 8 9 -// 10 11 12 -// 13 14 15 +// 10 11 12 +// 13 14 15 +// // which is passed to NewTriBandDense as []float64{*, *, *, 1, 2, ..., 15} // with k=2 and kind = mat.Lower. // Only the values in the band portion of the matrix are used. @@ -324,6 +331,7 @@ func (t *TriBandDense) reuseAsZeroed(n, k int, kind TriKind) { // with the given bandwidth and orientation. If the receiver is not empty, // reuseAsZeroed checks that the receiver has the correct size, bandwidth and // orientation. +// //lint:ignore U1000 This will be used later. func (t *TriBandDense) reuseAsNonZeroed(n, k int, kind TriKind) { // reuseAsNonZeroed must be kept in sync with reuseAsZeroed. @@ -520,9 +528,10 @@ func (t *TriBandDense) DiagView() Diagonal { } // Norm returns the specified norm of the receiver. Valid norms are: -// 1 - The maximum absolute column sum -// 2 - The Frobenius norm, the square root of the sum of the squares of the elements -// Inf - The maximum absolute row sum +// +// 1 - The maximum absolute column sum +// 2 - The Frobenius norm, the square root of the sum of the squares of the elements +// Inf - The maximum absolute row sum // // Norm will panic with ErrNormOrder if an illegal norm is specified and with // ErrZeroLength if the matrix has zero size. diff --git a/mat/tridiag.go b/mat/tridiag.go index 4309e711..80a1bd4a 100644 --- a/mat/tridiag.go +++ b/mat/tridiag.go @@ -187,9 +187,10 @@ func (a *Tridiag) Trace() float64 { } // Norm returns the specified norm of the receiver. Valid norms are: -// 1 - The maximum absolute column sum -// 2 - The Frobenius norm, the square root of the sum of the squares of the elements -// Inf - The maximum absolute row sum +// +// 1 - The maximum absolute column sum +// 2 - The Frobenius norm, the square root of the sum of the squares of the elements +// Inf - The maximum absolute row sum // // Norm will panic with ErrNormOrder if an illegal norm is specified and with // ErrZeroLength if the matrix has zero size. diff --git a/mat/vector.go b/mat/vector.go index f7cbbc96..2035e809 100644 --- a/mat/vector.go +++ b/mat/vector.go @@ -265,9 +265,10 @@ func (v *VecDense) CopyVec(a Vector) int { } // Norm returns the specified norm of the receiver. Valid norms are: -// 1 - The sum of the element magnitudes -// 2 - The Euclidean norm, the square root of the sum of the squares of the elements -// Inf - The maximum element magnitude +// +// 1 - The sum of the element magnitudes +// 2 - The Euclidean norm, the square root of the sum of the squares of the elements +// Inf - The maximum element magnitude // // Norm will panic with ErrNormOrder if an illegal norm is specified and with // ErrZeroLength if the vector has zero size. diff --git a/mathext/airy.go b/mathext/airy.go index d1865a9d..f2904b47 100644 --- a/mathext/airy.go +++ b/mathext/airy.go @@ -8,7 +8,9 @@ import "gonum.org/v1/gonum/mathext/internal/amos" // AiryAi returns the value of the Airy function at z. The Airy function here, // Ai(z), is one of the two linearly independent solutions to -// y′′ - y*z = 0. +// +// y′′ - y*z = 0. +// // See http://mathworld.wolfram.com/AiryFunctions.html for more detailed information. func AiryAi(z complex128) complex128 { // id specifies the order of the derivative to compute, @@ -23,7 +25,9 @@ func AiryAi(z complex128) complex128 { // AiryAiDeriv returns the value of the derivative of the Airy function at z. The // Airy function here, Ai(z), is one of the two linearly independent solutions to -// y′′ - y*z = 0. +// +// y′′ - y*z = 0. +// // See http://mathworld.wolfram.com/AiryFunctions.html for more detailed information. func AiryAiDeriv(z complex128) complex128 { // id specifies the order of the derivative to compute, diff --git a/mathext/beta.go b/mathext/beta.go index c1c5348e..203ec577 100644 --- a/mathext/beta.go +++ b/mathext/beta.go @@ -7,13 +7,16 @@ package mathext import "gonum.org/v1/gonum/mathext/internal/gonum" // Beta returns the value of the complete beta function B(a, b). It is defined as -// Γ(a)Γ(b) / Γ(a+b) +// +// Γ(a)Γ(b) / Γ(a+b) +// // Special cases are: -// B(a,b) returns NaN if a or b is Inf -// B(a,b) returns NaN if a and b are 0 -// B(a,b) returns NaN if a or b is NaN -// B(a,b) returns NaN if a or b is < 0 -// B(a,b) returns +Inf if a xor b is 0. +// +// B(a,b) returns NaN if a or b is Inf +// B(a,b) returns NaN if a and b are 0 +// B(a,b) returns NaN if a or b is NaN +// B(a,b) returns NaN if a or b is < 0 +// B(a,b) returns +Inf if a xor b is 0. // // See http://mathworld.wolfram.com/BetaFunction.html for more detailed informations. func Beta(a, b float64) float64 { @@ -22,13 +25,16 @@ func Beta(a, b float64) float64 { // Lbeta returns the natural logarithm of the complete beta function B(a,b). // Lbeta is defined as: -// Ln(Γ(a)Γ(b)/Γ(a+b)) +// +// Ln(Γ(a)Γ(b)/Γ(a+b)) +// // Special cases are: -// Lbeta(a,b) returns NaN if a or b is Inf -// Lbeta(a,b) returns NaN if a and b are 0 -// Lbeta(a,b) returns NaN if a or b is NaN -// Lbeta(a,b) returns NaN if a or b is < 0 -// Lbeta(a,b) returns +Inf if a xor b is 0. +// +// Lbeta(a,b) returns NaN if a or b is Inf +// Lbeta(a,b) returns NaN if a and b are 0 +// Lbeta(a,b) returns NaN if a or b is NaN +// Lbeta(a,b) returns NaN if a or b is < 0 +// Lbeta(a,b) returns +Inf if a xor b is 0. func Lbeta(a, b float64) float64 { return gonum.Lbeta(a, b) } diff --git a/mathext/betainc.go b/mathext/betainc.go index 42d8dac2..9a0c61a9 100644 --- a/mathext/betainc.go +++ b/mathext/betainc.go @@ -8,8 +8,10 @@ import "gonum.org/v1/gonum/mathext/internal/cephes" // RegIncBeta returns the value of the regularized incomplete beta function // I(x;a,b). It is defined as -// I(x;a,b) = B(x;a,b) / B(a,b) -// = Γ(a+b) / (Γ(a)*Γ(b)) * int_0^x u^(a-1) * (1-u)^(b-1) du. +// +// I(x;a,b) = B(x;a,b) / B(a,b) +// = Γ(a+b) / (Γ(a)*Γ(b)) * int_0^x u^(a-1) * (1-u)^(b-1) du. +// // The domain of definition is 0 <= x <= 1, and the parameters a and b must be positive. // For other values of x, a, and b RegIncBeta will panic. func RegIncBeta(a, b float64, x float64) float64 { @@ -18,7 +20,9 @@ func RegIncBeta(a, b float64, x float64) float64 { // InvRegIncBeta computes the inverse of the regularized incomplete beta function. // It returns the x for which -// y = I(x;a,b) +// +// y = I(x;a,b) +// // The domain of definition is 0 <= y <= 1, and the parameters a and b must be // positive. For other values of x, a, and b InvRegIncBeta will panic. func InvRegIncBeta(a, b float64, y float64) float64 { diff --git a/mathext/digamma.go b/mathext/digamma.go index 73092e4e..67ebf007 100644 --- a/mathext/digamma.go +++ b/mathext/digamma.go @@ -9,7 +9,8 @@ import ( ) // Digamma returns the logorithmic derivative of the gamma function at x. -// ψ(x) = d/dx (Ln (Γ(x)). +// +// ψ(x) = d/dx (Ln (Γ(x)). func Digamma(x float64) float64 { // This is adapted from // http://web.science.mq.edu.au/~mjohnson/code/digamma.c diff --git a/mathext/ell_carlson.go b/mathext/ell_carlson.go index dd698eb6..1334f6b9 100644 --- a/mathext/ell_carlson.go +++ b/mathext/ell_carlson.go @@ -9,15 +9,19 @@ import ( ) // EllipticRF computes the symmetric elliptic integral R_F(x,y,z): -// R_F(x,y,z) = (1/2)\int_{0}^{\infty}{1/s(t)} dt, -// s(t) = \sqrt{(t+x)(t+y)(t+z)}. +// +// R_F(x,y,z) = (1/2)\int_{0}^{\infty}{1/s(t)} dt, +// s(t) = \sqrt{(t+x)(t+y)(t+z)}. // // The arguments x, y, z must satisfy the following conditions, otherwise the function returns math.NaN(): -// 0 ≤ x,y,z ≤ upper, -// lower ≤ x+y,y+z,z+x, +// +// 0 ≤ x,y,z ≤ upper, +// lower ≤ x+y,y+z,z+x, +// // where: -// lower = 5/(2^1022) = 1.112536929253601e-307, -// upper = (2^1022)/5 = 8.988465674311580e+306. +// +// lower = 5/(2^1022) = 1.112536929253601e-307, +// upper = (2^1022)/5 = 8.988465674311580e+306. // // The definition of the symmetric elliptic integral R_F can be found in NIST // Digital Library of Mathematical Functions (http://dlmf.nist.gov/19.16.E1). @@ -67,16 +71,20 @@ func EllipticRF(x, y, z float64) float64 { } // EllipticRD computes the symmetric elliptic integral R_D(x,y,z): -// R_D(x,y,z) = (1/2)\int_{0}^{\infty}{1/(s(t)(t+z))} dt, -// s(t) = \sqrt{(t+x)(t+y)(t+z)}. +// +// R_D(x,y,z) = (1/2)\int_{0}^{\infty}{1/(s(t)(t+z))} dt, +// s(t) = \sqrt{(t+x)(t+y)(t+z)}. // // The arguments x, y, z must satisfy the following conditions, otherwise the function returns math.NaN(): -// 0 ≤ x,y ≤ upper, -// lower ≤ z ≤ upper, -// lower ≤ x+y, +// +// 0 ≤ x,y ≤ upper, +// lower ≤ z ≤ upper, +// lower ≤ x+y, +// // where: -// lower = (5/(2^1022))^(1/3) = 4.809554074311679e-103, -// upper = ((2^1022)/5)^(1/3) = 2.079194837087086e+102. +// +// lower = (5/(2^1022))^(1/3) = 4.809554074311679e-103, +// upper = ((2^1022)/5)^(1/3) = 2.079194837087086e+102. // // The definition of the symmetric elliptic integral R_D can be found in NIST // Digital Library of Mathematical Functions (http://dlmf.nist.gov/19.16.E5). @@ -129,10 +137,12 @@ func EllipticRD(x, y, z float64) float64 { } // EllipticF computes the Legendre's elliptic integral of the 1st kind F(phi,m), 0≤m<1: -// F(\phi,m) = \int_{0}^{\phi} 1 / \sqrt{1-m\sin^2(\theta)} d\theta +// +// F(\phi,m) = \int_{0}^{\phi} 1 / \sqrt{1-m\sin^2(\theta)} d\theta // // Legendre's elliptic integrals can be expressed as symmetric elliptic integrals, in this case: -// F(\phi,m) = \sin\phi R_F(\cos^2\phi,1-m\sin^2\phi,1) +// +// F(\phi,m) = \sin\phi R_F(\cos^2\phi,1-m\sin^2\phi,1) // // The definition of F(phi,k) where k=sqrt(m) can be found in NIST Digital Library of Mathematical // Functions (http://dlmf.nist.gov/19.2.E4). @@ -142,10 +152,12 @@ func EllipticF(phi, m float64) float64 { } // EllipticE computes the Legendre's elliptic integral of the 2nd kind E(phi,m), 0≤m<1: -// E(\phi,m) = \int_{0}^{\phi} \sqrt{1-m\sin^2(\theta)} d\theta +// +// E(\phi,m) = \int_{0}^{\phi} \sqrt{1-m\sin^2(\theta)} d\theta // // Legendre's elliptic integrals can be expressed as symmetric elliptic integrals, in this case: -// E(\phi,m) = \sin\phi R_F(\cos^2\phi,1-m\sin^2\phi,1)-(m/3)\sin^3\phi R_D(\cos^2\phi,1-m\sin^2\phi,1) +// +// E(\phi,m) = \sin\phi R_F(\cos^2\phi,1-m\sin^2\phi,1)-(m/3)\sin^3\phi R_D(\cos^2\phi,1-m\sin^2\phi,1) // // The definition of E(phi,k) where k=sqrt(m) can be found in NIST Digital Library of Mathematical // Functions (http://dlmf.nist.gov/19.2.E5). diff --git a/mathext/ell_complete.go b/mathext/ell_complete.go index f5a176a7..bdba081a 100644 --- a/mathext/ell_complete.go +++ b/mathext/ell_complete.go @@ -10,7 +10,7 @@ import ( // CompleteK computes the complete elliptic integral of the 1st kind, 0≤m≤1. It returns math.NaN() if m is not in [0,1]. // -// K(m) = \int_{0}^{π/2} 1/{\sqrt{1-m{\sin^2θ}}} dθ +// K(m) = \int_{0}^{π/2} 1/{\sqrt{1-m{\sin^2θ}}} dθ func CompleteK(m float64) float64 { // Reference: // Toshio Fukushima, Precise and fast computation of complete elliptic integrals @@ -100,7 +100,7 @@ func CompleteK(m float64) float64 { // CompleteE computes the complete elliptic integral of the 2nd kind, 0≤m≤1. It returns math.NaN() if m is not in [0,1]. // -// E(m) = \int_{0}^{π/2} {\sqrt{1-m{\sin^2θ}}} dθ +// E(m) = \int_{0}^{π/2} {\sqrt{1-m{\sin^2θ}}} dθ func CompleteE(m float64) float64 { // Reference: // Toshio Fukushima, Precise and fast computation of complete elliptic integrals @@ -183,7 +183,7 @@ func CompleteE(m float64) float64 { // CompleteB computes an associate complete elliptic integral of the 2nd kind, 0≤m≤1. It returns math.NaN() if m is not in [0,1]. // -// B(m) = \int_{0}^{π/2} {\cos^2θ} / {\sqrt{1-m{\sin^2θ}}} dθ +// B(m) = \int_{0}^{π/2} {\cos^2θ} / {\sqrt{1-m{\sin^2θ}}} dθ func CompleteB(m float64) float64 { // Reference: // Toshio Fukushima, Precise and fast computation of complete elliptic integrals @@ -266,7 +266,7 @@ func CompleteB(m float64) float64 { // CompleteD computes an associate complete elliptic integral of the 2nd kind, 0≤m≤1. It returns math.NaN() if m is not in [0,1]. // -// D(m) = \int_{0}^{π/2} {\sin^2θ} / {\sqrt{1-m{\sin^2θ}}} dθ +// D(m) = \int_{0}^{π/2} {\sin^2θ} / {\sqrt{1-m{\sin^2θ}}} dθ func CompleteD(m float64) float64 { // Reference: // Toshio Fukushima, Precise and fast computation of complete elliptic integrals diff --git a/mathext/ell_complete_test.go b/mathext/ell_complete_test.go index 153e0424..ed79bdfb 100644 --- a/mathext/ell_complete_test.go +++ b/mathext/ell_complete_test.go @@ -32,6 +32,7 @@ func TestCompleteKE(t *testing.T) { // for m=0.0001(0.0001)0.9999. // // K(m) and E(m) can be computed without cancellation problems as following: +// // K(m) = B(m) + D(m), // E(m) = B(m) + (1-m)D(m). func TestCompleteBD(t *testing.T) { diff --git a/mathext/gamma_inc.go b/mathext/gamma_inc.go index 77bbcfe1..c4abe2c2 100644 --- a/mathext/gamma_inc.go +++ b/mathext/gamma_inc.go @@ -9,7 +9,9 @@ import ( ) // GammaIncReg computes the regularized incomplete Gamma integral. -// GammaIncReg(a,x) = (1/ Γ(a)) \int_0^x e^{-t} t^{a-1} dt +// +// GammaIncReg(a,x) = (1/ Γ(a)) \int_0^x e^{-t} t^{a-1} dt +// // The input argument a must be positive and x must be non-negative or GammaIncReg // will panic. // @@ -21,8 +23,10 @@ func GammaIncReg(a, x float64) float64 { } // GammaIncRegComp computes the complemented regularized incomplete Gamma integral. -// GammaIncRegComp(a,x) = 1 - GammaIncReg(a,x) -// = (1/ Γ(a)) \int_x^\infty e^{-t} t^{a-1} dt +// +// GammaIncRegComp(a,x) = 1 - GammaIncReg(a,x) +// = (1/ Γ(a)) \int_x^\infty e^{-t} t^{a-1} dt +// // The input argument a must be positive and x must be non-negative or // GammaIncRegComp will panic. func GammaIncRegComp(a, x float64) float64 { @@ -31,7 +35,9 @@ func GammaIncRegComp(a, x float64) float64 { // GammaIncRegInv computes the inverse of the regularized incomplete Gamma integral. That is, // it returns the x such that: -// GammaIncReg(a, x) = y +// +// GammaIncReg(a, x) = y +// // The input argument a must be positive and y must be between 0 and 1 // inclusive or GammaIncRegInv will panic. GammaIncRegInv should return a positive // number, but can return NaN if there is a failure to converge. @@ -41,7 +47,9 @@ func GammaIncRegInv(a, y float64) float64 { // GammaIncRegCompInv computes the inverse of the complemented regularized incomplete Gamma // integral. That is, it returns the x such that: -// GammaIncRegComp(a, x) = y +// +// GammaIncRegComp(a, x) = y +// // The input argument a must be positive and y must be between 0 and 1 // inclusive or GammaIncRegCompInv will panic. GammaIncRegCompInv should return a // positive number, but can return 0 even with non-zero y due to underflow. diff --git a/mathext/gamma_inc_inv.go b/mathext/gamma_inc_inv.go index 24a0e6f6..175cb6bc 100644 --- a/mathext/gamma_inc_inv.go +++ b/mathext/gamma_inc_inv.go @@ -24,7 +24,9 @@ func gammaIncReg(x float64, params []float64) float64 { // gammaIncRegInv is the inverse of the regularized incomplete Gamma integral. That is, it // returns x such that: -// Igam(a, x) = y +// +// Igam(a, x) = y +// // The input argument a must be positive and y must be between 0 and 1 // inclusive or gammaIncRegInv will panic. gammaIncRegInv should return a // positive number, but can return NaN if there is a failure to converge. diff --git a/mathext/internal/amos/amos.go b/mathext/internal/amos/amos.go index 57f2bdec..c9dbd0bf 100644 --- a/mathext/internal/amos/amos.go +++ b/mathext/internal/amos/amos.go @@ -1381,7 +1381,9 @@ FourtyFive: // underflow when y is scaled by tol. // // y enters as a scaled quantity whose magnitude is greater than -// 1e3 + 3*dmach(1)/tol +// +// 1e3 + 3*dmach(1)/tol +// // y is accepted if the underflow is at least one precision below the magnitude // of the largest component. Otherwise an underflow is assumed as the phase angle // does not have sufficient accuracy. @@ -1402,8 +1404,8 @@ func Zuchk(y complex128, scale, tol float64) int { // ZACAI APPLIES THE ANALYTIC CONTINUATION FORMULA // -// K(FNU,ZN*EXP(MP))=K(FNU,ZN)*EXP(-MP*FNU) - MP*I(FNU,ZN) -// MP=PI*MR*CMPLX(0.0,1.0) +// K(FNU,ZN*EXP(MP))=K(FNU,ZN)*EXP(-MP*FNU) - MP*I(FNU,ZN) +// MP=PI*MR*CMPLX(0.0,1.0) // // TO CONTINUE THE K FUNCTION FROM THE RIGHT HALF TO THE LEFT // HALF Z PLANE FOR USE WITH ZAIRY WHERE FNU=1/3 OR 2/3 AND N=1. diff --git a/mathext/internal/amos/origcode_test.go b/mathext/internal/amos/origcode_test.go index 6f128f73..35a6f81f 100644 --- a/mathext/internal/amos/origcode_test.go +++ b/mathext/internal/amos/origcode_test.go @@ -1380,8 +1380,8 @@ func zuchkOrig(YR, YI float64, NZ int, ASCLE, TOL float64) (YRout, YIout float64 // ZACAI APPLIES THE ANALYTIC CONTINUATION FORMULA // -// K(FNU,ZN*EXP(MP))=K(FNU,ZN)*EXP(-MP*FNU) - MP*I(FNU,ZN) -// MP=PI*MR*CMPLX(0.0,1.0) +// K(FNU,ZN*EXP(MP))=K(FNU,ZN)*EXP(-MP*FNU) - MP*I(FNU,ZN) +// MP=PI*MR*CMPLX(0.0,1.0) // // TO CONTINUE THE K FUNCTION FROM THE RIGHT HALF TO THE LEFT // HALF Z PLANE FOR USE WITH ZAIRY WHERE FNU=1/3 OR 2/3 AND N=1. diff --git a/mathext/internal/cephes/igam.go b/mathext/internal/cephes/igam.go index fcd2a183..4bc0bd1d 100644 --- a/mathext/internal/cephes/igam.go +++ b/mathext/internal/cephes/igam.go @@ -52,7 +52,9 @@ var igamCoefs = [igamDimK][igamDimN]float64{ } // Igam computes the incomplete Gamma integral. -// Igam(a,x) = (1/ Γ(a)) \int_0^x e^{-t} t^{a-1} dt +// +// Igam(a,x) = (1/ Γ(a)) \int_0^x e^{-t} t^{a-1} dt +// // The input argument a must be positive and x must be non-negative or Igam // will panic. func Igam(a, x float64) float64 { @@ -87,8 +89,10 @@ func Igam(a, x float64) float64 { } // IgamC computes the complemented incomplete Gamma integral. -// IgamC(a,x) = 1 - Igam(a,x) -// = (1/ Γ(a)) \int_0^\infty e^{-t} t^{a-1} dt +// +// IgamC(a,x) = 1 - Igam(a,x) +// = (1/ Γ(a)) \int_0^\infty e^{-t} t^{a-1} dt +// // The input argument a must be positive and x must be non-negative or // IgamC will panic. func IgamC(a, x float64) float64 { @@ -135,11 +139,16 @@ func IgamC(a, x float64) float64 { } // igamFac computes -// x^a * e^{-x} / Γ(a) +// +// x^a * e^{-x} / Γ(a) +// // corrected from (15) and (16) in [2] by replacing -// e^{x - a} +// +// e^{x - a} +// // with -// e^{a - x} +// +// e^{a - x} func igamFac(a, x float64) float64 { if math.Abs(a-x) > 0.4*math.Abs(a) { ax := a*math.Log(x) - x - lgam(a) diff --git a/mathext/internal/cephes/igami.go b/mathext/internal/cephes/igami.go index 697582e4..bb80b9cf 100644 --- a/mathext/internal/cephes/igami.go +++ b/mathext/internal/cephes/igami.go @@ -13,7 +13,9 @@ import "math" // IgamI computes the inverse of the incomplete Gamma function. That is, it // returns the x such that: -// IgamC(a, x) = p +// +// IgamC(a, x) = p +// // The input argument a must be positive and p must be between 0 and 1 // inclusive or IgamI will panic. IgamI should return a positive number, but // can return 0 even with non-zero y due to underflow. diff --git a/mathext/internal/cephes/polevl.go b/mathext/internal/cephes/polevl.go index 4e438b2a..aec399f3 100644 --- a/mathext/internal/cephes/polevl.go +++ b/mathext/internal/cephes/polevl.go @@ -12,7 +12,9 @@ package cephes import "math" // polevl evaluates a polynomial of degree N -// y = c_0 + c_1 x_1 + c_2 x_2^2 ... +// +// y = c_0 + c_1 x_1 + c_2 x_2^2 ... +// // where the coefficients are stored in reverse order, i.e. coef[0] = c_n and // coef[n] = c_0. func polevl(x float64, coef []float64, n int) float64 { diff --git a/mathext/internal/cephes/unity.go b/mathext/internal/cephes/unity.go index cb1695fa..3996e7e5 100644 --- a/mathext/internal/cephes/unity.go +++ b/mathext/internal/cephes/unity.go @@ -24,9 +24,13 @@ const ( ) // Coefficients for -// log(1+x) = x - \frac{x^2}{2} + \frac{x^3 lP(x)}{lQ(x)} +// +// log(1+x) = x - \frac{x^2}{2} + \frac{x^3 lP(x)}{lQ(x)} +// // for -// \frac{1}{\sqrt{2}} <= x < \sqrt{2} +// +// \frac{1}{\sqrt{2}} <= x < \sqrt{2} +// // Theoretical peak relative error = 2.32e-20 var lP = [...]float64{ 4.5270000862445199635215e-5, @@ -48,7 +52,8 @@ var lQ = [...]float64{ } // log1p computes -// log(1 + x) +// +// log(1 + x) func log1p(x float64) float64 { z := 1 + x if z < invSqrt2 || z > math.Sqrt2 { @@ -60,7 +65,8 @@ func log1p(x float64) float64 { } // log1pmx computes -// log(1 + x) - x +// +// log(1 + x) - x func log1pmx(x float64) float64 { if math.Abs(x) < 0.5 { xfac := x @@ -81,9 +87,12 @@ func log1pmx(x float64) float64 { } // Coefficients for -// e^x = 1 + \frac{2x eP(x^2)}{eQ(x^2) - eP(x^2)} +// +// e^x = 1 + \frac{2x eP(x^2)}{eQ(x^2) - eP(x^2)} +// // for -// -0.5 <= x <= 0.5 +// +// -0.5 <= x <= 0.5 var eP = [...]float64{ 1.2617719307481059087798e-4, 3.0299440770744196129956e-2, @@ -98,7 +107,8 @@ var eQ = [...]float64{ } // expm1 computes -// expm1(x) = e^x - 1 +// +// expm1(x) = e^x - 1 func expm1(x float64) float64 { if math.IsInf(x, 0) { if math.IsNaN(x) || x > 0 { @@ -126,7 +136,8 @@ var coscof = [...]float64{ } // cosm1 computes -// cosm1(x) = cos(x) - 1 +// +// cosm1(x) = cos(x) - 1 func cosm1(x float64) float64 { if x < -pi4 || x > pi4 { return math.Cos(x) - 1 @@ -137,8 +148,10 @@ func cosm1(x float64) float64 { } // lgam1pTayler computes -// lgam(x + 1) -//around x = 0 using its Taylor series. +// +// lgam(x + 1) +// +// around x = 0 using its Taylor series. func lgam1pTaylor(x float64) float64 { if x == 0 { return 0 @@ -159,7 +172,8 @@ func lgam1pTaylor(x float64) float64 { } // lgam1p computes -// lgam(x + 1) +// +// lgam(x + 1) func lgam1p(x float64) float64 { if math.Abs(x) <= 0.5 { return lgam1pTaylor(x) diff --git a/mathext/internal/cephes/zeta.go b/mathext/internal/cephes/zeta.go index f87b552e..0efeaa60 100644 --- a/mathext/internal/cephes/zeta.go +++ b/mathext/internal/cephes/zeta.go @@ -13,9 +13,13 @@ import "math" // zetaCoegs are the expansion coefficients for Euler-Maclaurin summation // formula: -// \frac{(2k)!}{B_{2k}} +// +// \frac{(2k)!}{B_{2k}} +// // where -// B_{2k} +// +// B_{2k} +// // are Bernoulli numbers. var zetaCoefs = [...]float64{ 12.0, @@ -33,13 +37,16 @@ var zetaCoefs = [...]float64{ } // Zeta computes the Riemann zeta function of two arguments. -// Zeta(x,q) = \sum_{k=0}^{\infty} (k+q)^{-x} +// +// Zeta(x,q) = \sum_{k=0}^{\infty} (k+q)^{-x} +// // Note that Zeta returns +Inf if x is 1 and will panic if x is less than 1, // q is either zero or a negative integer, or q is negative and x is not an // integer. // // Note that: -// zeta(x,1) = zetac(x) + 1 +// +// zeta(x,1) = zetac(x) + 1 func Zeta(x, q float64) float64 { // REFERENCE: Gradshteyn, I. S., and I. M. Ryzhik, Tables of Integrals, Series, // and Products, p. 1073; Academic Press, 1980. diff --git a/mathext/internal/gonum/beta.go b/mathext/internal/gonum/beta.go index cec8acfe..f1fb3587 100644 --- a/mathext/internal/gonum/beta.go +++ b/mathext/internal/gonum/beta.go @@ -9,13 +9,16 @@ import ( ) // Beta returns the value of the complete beta function B(a, b). It is defined as -// Γ(a)Γ(b) / Γ(a+b) +// +// Γ(a)Γ(b) / Γ(a+b) +// // Special cases are: -// B(a,b) returns NaN if a or b is Inf -// B(a,b) returns NaN if a and b are 0 -// B(a,b) returns NaN if a or b is NaN -// B(a,b) returns NaN if a or b is < 0 -// B(a,b) returns +Inf if a xor b is 0. +// +// B(a,b) returns NaN if a or b is Inf +// B(a,b) returns NaN if a and b are 0 +// B(a,b) returns NaN if a or b is NaN +// B(a,b) returns NaN if a or b is < 0 +// B(a,b) returns +Inf if a xor b is 0. // // See http://mathworld.wolfram.com/BetaFunction.html for more detailed information. func Beta(a, b float64) float64 { @@ -24,13 +27,16 @@ func Beta(a, b float64) float64 { // Lbeta returns the natural logarithm of the complete beta function B(a,b). // Lbeta is defined as: -// Ln(Γ(a)Γ(b)/Γ(a+b)) +// +// Ln(Γ(a)Γ(b)/Γ(a+b)) +// // Special cases are: -// Lbeta(a,b) returns NaN if a or b is Inf -// Lbeta(a,b) returns NaN if a and b are 0 -// Lbeta(a,b) returns NaN if a or b is NaN -// Lbeta(a,b) returns NaN if a or b is < 0 -// Lbeta(a,b) returns +Inf if a xor b is 0. +// +// Lbeta(a,b) returns NaN if a or b is Inf +// Lbeta(a,b) returns NaN if a and b are 0 +// Lbeta(a,b) returns NaN if a or b is NaN +// Lbeta(a,b) returns NaN if a or b is < 0 +// Lbeta(a,b) returns +Inf if a xor b is 0. func Lbeta(a, b float64) float64 { switch { case math.IsInf(a, +1) || math.IsInf(b, +1): diff --git a/mathext/roots.go b/mathext/roots.go index 490ca8ac..120ce6ef 100644 --- a/mathext/roots.go +++ b/mathext/roots.go @@ -31,19 +31,22 @@ const ( // falsePosition uses a combination of bisection and false position to find a // root of a function within a given interval. This is guaranteed to converge, // and always keeps a bounding interval, unlike Newton's method. Inputs are: -// x1, x2: initial bounding interval -// f1, f2: value of f() at x1 and x2 -// absErr, relErr: absolute and relative errors on the bounding interval -// bisectTil: if > 0.0, perform bisection until the width of the bounding -// interval is less than this -// f, fExtra: function to find root of is f(x, fExtra) +// +// x1, x2: initial bounding interval +// f1, f2: value of f() at x1 and x2 +// absErr, relErr: absolute and relative errors on the bounding interval +// bisectTil: if > 0.0, perform bisection until the width of the bounding +// interval is less than this +// f, fExtra: function to find root of is f(x, fExtra) +// // Returns: -// result: whether an exact root was found, the process converged to a -// bounding interval small than the required error, or the max number -// of iterations was hit -// bestX: best root approximation -// bestF: function value at bestX -// errEst: error estimation +// +// result: whether an exact root was found, the process converged to a +// bounding interval small than the required error, or the max number +// of iterations was hit +// bestX: best root approximation +// bestF: function value at bestX +// errEst: error estimation func falsePosition(x1, x2, f1, f2, absErr, relErr, bisectTil float64, f objectiveFunc, fExtra []float64) (fSolveResult, float64, float64, float64) { // The false position steps are either unmodified, or modified with the // Anderson-Bjorck method as appropriate. Theoretically, this has a "speed of diff --git a/mathext/zeta.go b/mathext/zeta.go index 841b9b0f..23a87fae 100644 --- a/mathext/zeta.go +++ b/mathext/zeta.go @@ -7,7 +7,9 @@ package mathext import "gonum.org/v1/gonum/mathext/internal/cephes" // Zeta computes the Riemann zeta function of two arguments. -// Zeta(x,q) = \sum_{k=0}^{\infty} (k+q)^{-x} +// +// Zeta(x,q) = \sum_{k=0}^{\infty} (k+q)^{-x} +// // Note that Zeta returns +Inf if x is 1 and will panic if x is less than 1, // q is either zero or a negative integer, or q is negative and x is not an // integer. diff --git a/num/dual/dual.go b/num/dual/dual.go index 403e7b31..38c5cd55 100644 --- a/num/dual/dual.go +++ b/num/dual/dual.go @@ -97,6 +97,7 @@ func Mul(x, y Number) Number { // Inv returns the dual inverse of d. // // Special cases are: +// // Inv(±Inf) = ±0-0ϵ // Inv(±0) = ±Inf-Infϵ func Inv(d Number) Number { diff --git a/num/dual/dual_fike.go b/num/dual/dual_fike.go index 9d395f4a..b9a98c81 100644 --- a/num/dual/dual_fike.go +++ b/num/dual/dual_fike.go @@ -33,6 +33,7 @@ import "math" // PowReal returns x**p, the base-x exponential of p. // // Special cases are (in order): +// // PowReal(NaN+xϵ, ±0) = 1+NaNϵ for any x // PowReal(x, ±0) = 1 for any x // PowReal(1+xϵ, y) = 1+xyϵ for any y @@ -83,6 +84,7 @@ func Pow(d, p Number) Number { // Sqrt returns the square root of d. // // Special cases are: +// // Sqrt(+Inf) = +Inf // Sqrt(±0) = (±0+Infϵ) // Sqrt(x < 0) = NaN @@ -106,8 +108,10 @@ func Sqrt(d Number) Number { // Exp returns e**q, the base-e exponential of d. // // Special cases are: +// // Exp(+Inf) = +Inf // Exp(NaN) = NaN +// // Very large values overflow to 0 or +Inf. // Very small values underflow to 1. func Exp(d Number) Number { @@ -121,6 +125,7 @@ func Exp(d Number) Number { // Log returns the natural logarithm of d. // // Special cases are: +// // Log(+Inf) = (+Inf+0ϵ) // Log(0) = (-Inf±Infϵ) // Log(x < 0) = NaN @@ -153,6 +158,7 @@ func Log(d Number) Number { // Sin returns the sine of d. // // Special cases are: +// // Sin(±0) = (±0+Nϵ) // Sin(±Inf) = NaN // Sin(NaN) = NaN @@ -174,6 +180,7 @@ func Sin(d Number) Number { // Cos returns the cosine of d. // // Special cases are: +// // Cos(±Inf) = NaN // Cos(NaN) = NaN func Cos(d Number) Number { @@ -188,6 +195,7 @@ func Cos(d Number) Number { // Tan returns the tangent of d. // // Special cases are: +// // Tan(±0) = (±0+Nϵ) // Tan(±Inf) = NaN // Tan(NaN) = NaN @@ -209,6 +217,7 @@ func Tan(d Number) Number { // Asin returns the inverse sine of d. // // Special cases are: +// // Asin(±0) = (±0+Nϵ) // Asin(±1) = (±Inf+Infϵ) // Asin(x) = NaN if x < -1 or x > 1 @@ -241,6 +250,7 @@ func Asin(d Number) Number { // Acos returns the inverse cosine of d. // // Special cases are: +// // Acos(-1) = (Pi-Infϵ) // Acos(1) = (0-Infϵ) // Acos(x) = NaN if x < -1 or x > 1 @@ -268,6 +278,7 @@ func Acos(d Number) Number { // Atan returns the inverse tangent of d. // // Special cases are: +// // Atan(±0) = (±0+Nϵ) // Atan(±Inf) = (±Pi/2+0ϵ) func Atan(d Number) Number { diff --git a/num/dual/dual_hyperbolic.go b/num/dual/dual_hyperbolic.go index 829017f6..89215854 100644 --- a/num/dual/dual_hyperbolic.go +++ b/num/dual/dual_hyperbolic.go @@ -9,6 +9,7 @@ import "math" // Sinh returns the hyperbolic sine of d. // // Special cases are: +// // Sinh(±0) = (±0+Nϵ) // Sinh(±Inf) = ±Inf // Sinh(NaN) = NaN @@ -36,6 +37,7 @@ func Sinh(d Number) Number { // Cosh returns the hyperbolic cosine of d. // // Special cases are: +// // Cosh(±0) = 1 // Cosh(±Inf) = +Inf // Cosh(NaN) = NaN @@ -57,6 +59,7 @@ func Cosh(d Number) Number { // Tanh returns the hyperbolic tangent of d. // // Special cases are: +// // Tanh(±0) = (±0+Nϵ) // Tanh(±Inf) = (±1+0ϵ) // Tanh(NaN) = NaN @@ -89,6 +92,7 @@ func Tanh(d Number) Number { // Asinh returns the inverse hyperbolic sine of d. // // Special cases are: +// // Asinh(±0) = (±0+Nϵ) // Asinh(±Inf) = ±Inf // Asinh(NaN) = NaN @@ -110,6 +114,7 @@ func Asinh(d Number) Number { // Acosh returns the inverse hyperbolic cosine of d. // // Special cases are: +// // Acosh(+Inf) = +Inf // Acosh(1) = (0+Infϵ) // Acosh(x) = NaN if x < 1 @@ -138,6 +143,7 @@ func Acosh(d Number) Number { // Atanh returns the inverse hyperbolic tangent of d. // // Special cases are: +// // Atanh(1) = +Inf // Atanh(±0) = (±0+Nϵ) // Atanh(-1) = -Inf diff --git a/num/dualcmplx/dual.go b/num/dualcmplx/dual.go index 75b99fd1..a780c315 100644 --- a/num/dualcmplx/dual.go +++ b/num/dualcmplx/dual.go @@ -124,6 +124,7 @@ func Abs(d Number) float64 { // PowReal returns d**p, the base-d exponential of p. // // Special cases are (in order): +// // PowReal(NaN+xϵ, ±0) = 1+NaNϵ for any x // Pow(0+xϵ, y) = 0+Infϵ for all y < 1. // Pow(0+xϵ, y) = 0 for all y > 1. @@ -204,6 +205,7 @@ func Pow(d, p Number) Number { // Sqrt returns the square root of d. // // Special cases are: +// // Sqrt(+Inf) = +Inf // Sqrt(±0) = (±0+Infϵ) // Sqrt(x < 0) = NaN @@ -215,8 +217,10 @@ func Sqrt(d Number) Number { // Exp returns e**q, the base-e exponential of d. // // Special cases are: +// // Exp(+Inf) = +Inf // Exp(NaN) = NaN +// // Very large values overflow to 0 or +Inf. // Very small values underflow to 1. func Exp(d Number) Number { @@ -234,6 +238,7 @@ func Exp(d Number) Number { // Log returns the natural logarithm of d. // // Special cases are: +// // Log(+Inf) = (+Inf+0ϵ) // Log(0) = (-Inf±Infϵ) // Log(x < 0) = NaN diff --git a/num/dualquat/dual_fike.go b/num/dualquat/dual_fike.go index 1c8caa3d..6b5ddd20 100644 --- a/num/dualquat/dual_fike.go +++ b/num/dualquat/dual_fike.go @@ -37,6 +37,7 @@ import ( // PowReal returns d**p, the base-d exponential of p. // // Special cases are (in order): +// // PowReal(NaN+xϵ, ±0) = 1+NaNϵ for any x // PowReal(x, ±0) = 1 for any x // PowReal(1+xϵ, y) = 1+xyϵ for any y @@ -102,6 +103,7 @@ func Pow(d, p Number) Number { // Sqrt returns the square root of d // // Special cases are: +// // Sqrt(+Inf) = +Inf // Sqrt(±0) = (±0+Infϵ) // Sqrt(x < 0) = NaN @@ -113,8 +115,10 @@ func Sqrt(d Number) Number { // Exp returns e**d, the base-e exponential of d. // // Special cases are: +// // Exp(+Inf) = +Inf // Exp(NaN) = NaN +// // Very large values overflow to 0 or +Inf. // Very small values underflow to 1. func Exp(d Number) Number { @@ -128,6 +132,7 @@ func Exp(d Number) Number { // Log returns the natural logarithm of d. // // Special cases are: +// // Log(+Inf) = (+Inf+0ϵ) // Log(0) = (-Inf±Infϵ) // Log(x < 0) = NaN diff --git a/num/hyperdual/hyperdual.go b/num/hyperdual/hyperdual.go index 24bba723..ea4c4cd7 100644 --- a/num/hyperdual/hyperdual.go +++ b/num/hyperdual/hyperdual.go @@ -105,6 +105,7 @@ func Mul(x, y Number) Number { // Inv returns the hyperdual inverse of d. // // Special cases are: +// // Inv(±Inf) = ±0-0ϵ₁-0ϵ₂±0ϵ₁ϵ₂ // Inv(±0) = ±Inf-Infϵ₁-Infϵ₂±Infϵ₁ϵ₂ func Inv(d Number) Number { diff --git a/num/hyperdual/hyperdual_fike.go b/num/hyperdual/hyperdual_fike.go index bf991316..335cf8ab 100644 --- a/num/hyperdual/hyperdual_fike.go +++ b/num/hyperdual/hyperdual_fike.go @@ -33,6 +33,7 @@ import "math" // PowReal returns x**p, the base-x exponential of p. // // Special cases are (in order): +// // PowReal(NaN+xϵ₁+yϵ₂, ±0) = 1+NaNϵ₁+NaNϵ₂+NaNϵ₁ϵ₂ for any x and y // PowReal(x, ±0) = 1 for any x // PowReal(1+xϵ₁+yϵ₂, z) = 1+xzϵ₁+yzϵ₂+2xyzϵ₁ϵ₂ for any z @@ -86,6 +87,7 @@ func Pow(d, p Number) Number { // Sqrt returns the square root of d. // // Special cases are: +// // Sqrt(+Inf) = +Inf // Sqrt(±0) = (±0+Infϵ₁+Infϵ₂-Infϵ₁ϵ₂) // Sqrt(x < 0) = NaN @@ -113,8 +115,10 @@ func Sqrt(d Number) Number { // Exp returns e**q, the base-e exponential of d. // // Special cases are: +// // Exp(+Inf) = +Inf // Exp(NaN) = NaN +// // Very large values overflow to 0 or +Inf. // Very small values underflow to 1. func Exp(d Number) Number { @@ -130,6 +134,7 @@ func Exp(d Number) Number { // Log returns the natural logarithm of d. // // Special cases are: +// // Log(+Inf) = (+Inf+0ϵ₁+0ϵ₂-0ϵ₁ϵ₂) // Log(0) = (-Inf±Infϵ₁±Infϵ₂-Infϵ₁ϵ₂) // Log(x < 0) = NaN @@ -172,6 +177,7 @@ func Log(d Number) Number { // Sin returns the sine of d. // // Special cases are: +// // Sin(±0) = (±0+Nϵ₁+Nϵ₂∓0ϵ₁ϵ₂) // Sin(±Inf) = NaN // Sin(NaN) = NaN @@ -197,6 +203,7 @@ func Sin(d Number) Number { // Cos returns the cosine of d. // // Special cases are: +// // Cos(±Inf) = NaN // Cos(NaN) = NaN func Cos(d Number) Number { @@ -213,6 +220,7 @@ func Cos(d Number) Number { // Tan returns the tangent of d. // // Special cases are: +// // Tan(±0) = (±0+Nϵ₁+Nϵ₂±0ϵ₁ϵ₂) // Tan(±Inf) = NaN // Tan(NaN) = NaN @@ -238,6 +246,7 @@ func Tan(d Number) Number { // Asin returns the inverse sine of d. // // Special cases are: +// // Asin(±0) = (±0+Nϵ₁+Nϵ₂±0ϵ₁ϵ₂) // Asin(±1) = (±Inf+Infϵ₁+Infϵ₂±Infϵ₁ϵ₂) // Asin(x) = NaN if x < -1 or x > 1 @@ -279,6 +288,7 @@ func Asin(d Number) Number { // Acos returns the inverse cosine of d. // // Special cases are: +// // Acos(-1) = (Pi-Infϵ₁-Infϵ₂+Infϵ₁ϵ₂) // Acos(1) = (0-Infϵ₁-Infϵ₂-Infϵ₁ϵ₂) // Acos(x) = NaN if x < -1 or x > 1 @@ -313,6 +323,7 @@ func Acos(d Number) Number { // Atan returns the inverse tangent of d. // // Special cases are: +// // Atan(±0) = (±0+Nϵ₁+Nϵ₂∓0ϵ₁ϵ₂) // Atan(±Inf) = (±Pi/2+0ϵ₁+0ϵ₂∓0ϵ₁ϵ₂) func Atan(d Number) Number { diff --git a/num/hyperdual/hyperdual_hyperbolic.go b/num/hyperdual/hyperdual_hyperbolic.go index 5128789f..2340889c 100644 --- a/num/hyperdual/hyperdual_hyperbolic.go +++ b/num/hyperdual/hyperdual_hyperbolic.go @@ -9,6 +9,7 @@ import "math" // Sinh returns the hyperbolic sine of d. // // Special cases are: +// // Sinh(±0) = (±0+Nϵ₁+Nϵ₂±0ϵ₁ϵ₂) // Sinh(±Inf) = ±Inf // Sinh(NaN) = NaN @@ -42,6 +43,7 @@ func Sinh(d Number) Number { // Cosh returns the hyperbolic cosine of d. // // Special cases are: +// // Cosh(±0) = 1 // Cosh(±Inf) = +Inf // Cosh(NaN) = NaN @@ -67,6 +69,7 @@ func Cosh(d Number) Number { // Tanh returns the hyperbolic tangent of d. // // Special cases are: +// // Tanh(±0) = (±0+Nϵ₁+Nϵ₂∓0ϵ₁ϵ₂) // Tanh(±Inf) = (±1+0ϵ₁+0ϵ₂∓0ϵ₁ϵ₂) // Tanh(NaN) = NaN @@ -107,6 +110,7 @@ func Tanh(d Number) Number { // Asinh returns the inverse hyperbolic sine of d. // // Special cases are: +// // Asinh(±0) = (±0+Nϵ₁+Nϵ₂∓0ϵ₁ϵ₂) // Asinh(±Inf) = ±Inf // Asinh(NaN) = NaN @@ -133,6 +137,7 @@ func Asinh(d Number) Number { // Acosh returns the inverse hyperbolic cosine of d. // // Special cases are: +// // Acosh(+Inf) = +Inf // Acosh(1) = (0+Infϵ₁+Infϵ₂-Infϵ₁ϵ₂) // Acosh(x) = NaN if x < 1 @@ -168,6 +173,7 @@ func Acosh(d Number) Number { // Atanh returns the inverse hyperbolic tangent of d. // // Special cases are: +// // Atanh(1) = +Inf // Atanh(±0) = (±0+Nϵ₁+Nϵ₂±0ϵ₁ϵ₂) // Atanh(-1) = -Inf diff --git a/num/quat/exp.go b/num/quat/exp.go index b42769ca..90fa041a 100644 --- a/num/quat/exp.go +++ b/num/quat/exp.go @@ -34,9 +34,10 @@ func Log(q Number) Number { // Pow return q**r, the base-q exponential of r. // For generalized compatibility with math.Pow: -// Pow(0, ±0) returns 1+0i+0j+0k -// Pow(0, c) for real(c)<0 returns Inf+0i+0j+0k if imag(c), jmag(c), kmag(c) are zero, -// otherwise Inf+Inf i+Inf j+Inf k. +// +// Pow(0, ±0) returns 1+0i+0j+0k +// Pow(0, c) for real(c)<0 returns Inf+0i+0j+0k if imag(c), jmag(c), kmag(c) are zero, +// otherwise Inf+Inf i+Inf j+Inf k. func Pow(q, r Number) Number { if q == zero { w, uv := split(r) @@ -57,8 +58,9 @@ func Pow(q, r Number) Number { // PowReal return q**r, the base-q exponential of r. // For generalized compatibility with math.Pow: -// PowReal(0, ±0) returns 1+0i+0j+0k -// PowReal(0, c) for c<0 returns Inf+0i+0j+0k. +// +// PowReal(0, ±0) returns 1+0i+0j+0k +// PowReal(0, c) for c<0 returns Inf+0i+0j+0k. func PowReal(q Number, r float64) Number { if q == zero { switch { diff --git a/num/quat/nan.go b/num/quat/nan.go index 02afe48a..b543012d 100644 --- a/num/quat/nan.go +++ b/num/quat/nan.go @@ -19,7 +19,7 @@ func IsNaN(q Number) bool { return math.IsNaN(q.Real) || math.IsNaN(q.Imag) || math.IsNaN(q.Jmag) || math.IsNaN(q.Kmag) } -// NaN returns a quaternion ``not-a-number'' value. +// NaN returns a quaternion “not-a-number” value. func NaN() Number { nan := math.NaN() return Number{Real: nan, Imag: nan, Jmag: nan, Kmag: nan} diff --git a/optimize/cg.go b/optimize/cg.go index 9bf88c58..6474b037 100644 --- a/optimize/cg.go +++ b/optimize/cg.go @@ -43,7 +43,9 @@ var ( // CG implements the nonlinear conjugate gradient method for solving nonlinear // unconstrained optimization problems. It is a line search method that // generates the search directions d_k according to the formula -// d_{k+1} = -∇f_{k+1} + β_k*d_k, d_0 = -∇f_0. +// +// d_{k+1} = -∇f_{k+1} + β_k*d_k, d_0 = -∇f_0. +// // Variants of the conjugate gradient method differ in the choice of the // parameter β_k. The conjugate gradient method usually requires fewer function // evaluations than the gradient descent method and no matrix storage, but @@ -52,13 +54,13 @@ var ( // CG implements a restart strategy that takes the steepest descent direction // (i.e., d_{k+1} = -∇f_{k+1}) whenever any of the following conditions holds: // -// - A certain number of iterations has elapsed without a restart. This number -// is controllable via IterationRestartFactor and if equal to 0, it is set to -// a reasonable default based on the problem dimension. -// - The angle between the gradients at two consecutive iterations ∇f_k and -// ∇f_{k+1} is too large. -// - The direction d_{k+1} is not a descent direction. -// - β_k returned from CGVariant.Beta is equal to zero. +// - A certain number of iterations has elapsed without a restart. This number +// is controllable via IterationRestartFactor and if equal to 0, it is set to +// a reasonable default based on the problem dimension. +// - The angle between the gradients at two consecutive iterations ∇f_k and +// ∇f_{k+1} is too large. +// - The direction d_{k+1} is not a descent direction. +// - β_k returned from CGVariant.Beta is equal to zero. // // The line search for CG must yield step sizes that satisfy the strong Wolfe // conditions at every iteration, otherwise the generated search direction @@ -263,7 +265,8 @@ func (*CG) needs() struct { // FletcherReeves implements the Fletcher-Reeves variant of the CG method that // computes the scaling parameter β_k according to the formula -// β_k = |∇f_{k+1}|^2 / |∇f_k|^2. +// +// β_k = |∇f_{k+1}|^2 / |∇f_k|^2. type FletcherReeves struct { prevNorm float64 } @@ -281,7 +284,9 @@ func (fr *FletcherReeves) Beta(grad, _, _ []float64) (beta float64) { // PolakRibierePolyak implements the Polak-Ribiere-Polyak variant of the CG // method that computes the scaling parameter β_k according to the formula -// β_k = max(0, ∇f_{k+1}·y_k / |∇f_k|^2), +// +// β_k = max(0, ∇f_{k+1}·y_k / |∇f_k|^2), +// // where y_k = ∇f_{k+1} - ∇f_k. type PolakRibierePolyak struct { prevNorm float64 @@ -301,7 +306,9 @@ func (pr *PolakRibierePolyak) Beta(grad, gradPrev, _ []float64) (beta float64) { // HestenesStiefel implements the Hestenes-Stiefel variant of the CG method // that computes the scaling parameter β_k according to the formula -// β_k = max(0, ∇f_{k+1}·y_k / d_k·y_k), +// +// β_k = max(0, ∇f_{k+1}·y_k / d_k·y_k), +// // where y_k = ∇f_{k+1} - ∇f_k. type HestenesStiefel struct { y []float64 @@ -319,7 +326,9 @@ func (hs *HestenesStiefel) Beta(grad, gradPrev, dirPrev []float64) (beta float64 // DaiYuan implements the Dai-Yuan variant of the CG method that computes the // scaling parameter β_k according to the formula -// β_k = |∇f_{k+1}|^2 / d_k·y_k, +// +// β_k = |∇f_{k+1}|^2 / d_k·y_k, +// // where y_k = ∇f_{k+1} - ∇f_k. type DaiYuan struct { y []float64 @@ -337,7 +346,9 @@ func (dy *DaiYuan) Beta(grad, gradPrev, dirPrev []float64) (beta float64) { // HagerZhang implements the Hager-Zhang variant of the CG method that computes the // scaling parameter β_k according to the formula -// β_k = (y_k - 2 d_k |y_k|^2/(d_k·y_k))·∇f_{k+1} / (d_k·y_k), +// +// β_k = (y_k - 2 d_k |y_k|^2/(d_k·y_k))·∇f_{k+1} / (d_k·y_k), +// // where y_k = ∇f_{k+1} - ∇f_k. type HagerZhang struct { y []float64 diff --git a/optimize/cmaes.go b/optimize/cmaes.go index 9c51211d..70ead5a8 100644 --- a/optimize/cmaes.go +++ b/optimize/cmaes.go @@ -23,10 +23,12 @@ var _ Method = (*CmaEsChol)(nil) // CmaEsChol implements the covariance matrix adaptation evolution strategy (CMA-ES) // based on the Cholesky decomposition. The full algorithm is described in -// Krause, Oswin, Dídac Rodríguez Arbonès, and Christian Igel. "CMA-ES with -// optimal covariance update and storage complexity." Advances in Neural -// Information Processing Systems. 2016. -// https://papers.nips.cc/paper/6457-cma-es-with-optimal-covariance-update-and-storage-complexity.pdf +// +// Krause, Oswin, Dídac Rodríguez Arbonès, and Christian Igel. "CMA-ES with +// optimal covariance update and storage complexity." Advances in Neural +// Information Processing Systems. 2016. +// https://papers.nips.cc/paper/6457-cma-es-with-optimal-covariance-update-and-storage-complexity.pdf +// // CMA-ES is a global optimization method that progressively adapts a population // of samples. CMA-ES combines techniques from local optimization with global // optimization. Specifically, the CMA-ES algorithm uses an initial multivariate @@ -50,8 +52,9 @@ var _ Method = (*CmaEsChol)(nil) // CMA-ES algorithm, but the covariance update equation is not identical. // // For more information about the CMA-ES algorithm, see -// https://en.wikipedia.org/wiki/CMA-ES -// https://arxiv.org/pdf/1604.00772.pdf +// +// https://en.wikipedia.org/wiki/CMA-ES +// https://arxiv.org/pdf/1604.00772.pdf type CmaEsChol struct { // InitStepSize sets the initial size of the covariance matrix adaptation. // If InitStepSize is 0, a default value of 0.5 is used. InitStepSize cannot diff --git a/optimize/convex/lp/convert.go b/optimize/convex/lp/convert.go index 2db3adad..539ef205 100644 --- a/optimize/convex/lp/convert.go +++ b/optimize/convex/lp/convert.go @@ -19,13 +19,17 @@ import ( // Convert converts a General-form LP into a standard form LP. // The general form of an LP is: -// minimize cᵀ * x -// s.t G * x <= h -// A * x = b +// +// minimize cᵀ * x +// s.t G * x <= h +// A * x = b +// // And the standard form is: -// minimize cNewᵀ * x -// s.t aNew * x = bNew -// x >= 0 +// +// minimize cNewᵀ * x +// s.t aNew * x = bNew +// x >= 0 +// // If there are no constraints of the given type, the inputs may be nil. func Convert(c []float64, g mat.Matrix, h []float64, a mat.Matrix, b []float64) (cNew []float64, aNew *mat.Dense, bNew []float64) { nVar := len(c) diff --git a/optimize/convex/lp/simplex.go b/optimize/convex/lp/simplex.go index 8b396149..930fd6b8 100644 --- a/optimize/convex/lp/simplex.go +++ b/optimize/convex/lp/simplex.go @@ -57,9 +57,11 @@ const ( // Simplex solves a linear program in standard form using Danzig's Simplex // algorithm. The standard form of a linear program is: -// minimize cᵀ x -// s.t. A*x = b -// x >= 0 . +// +// minimize cᵀ x +// s.t. A*x = b +// x >= 0 . +// // The input tol sets how close to the optimal solution is found (specifically, // when the maximal reduced cost is below tol). An error will be returned if the // problem is infeasible or unbounded. In rare cases, numeric errors can cause @@ -80,9 +82,12 @@ const ( // Simplex will panic. // // A description of the Simplex algorithm can be found in Ch. 8 of -// Strang, Gilbert. "Linear Algebra and Applications." Academic, New York (1976). +// +// Strang, Gilbert. "Linear Algebra and Applications." Academic, New York (1976). +// // For a detailed video introduction, see lectures 11-13 of UC Math 352 -// https://www.youtube.com/watch?v=ESzYPFkY3og&index=11&list=PLh464gFUoJWOmBYla3zbZbc4nv2AXez6X. +// +// https://www.youtube.com/watch?v=ESzYPFkY3og&index=11&list=PLh464gFUoJWOmBYla3zbZbc4nv2AXez6X. func Simplex(c []float64, A mat.Matrix, b []float64, tol float64, initialBasic []int) (optF float64, optX []float64, err error) { ans, x, _, err := simplex(initialBasic, c, A, b, tol) return ans, x, err diff --git a/optimize/functionconvergence.go b/optimize/functionconvergence.go index 7f987cfc..d5b12c30 100644 --- a/optimize/functionconvergence.go +++ b/optimize/functionconvergence.go @@ -34,9 +34,13 @@ func (NeverTerminate) Converged(loc *Location) Status { // over the last iterations. A FunctionConvergence status is returned if // there is no significant decrease for FunctionConverge.Iterations. A // significant decrease is considered if -// f < f_best +// +// f < f_best +// // and -// f_best - f > FunctionConverge.Relative * maxabs(f, f_best) + FunctionConverge.Absolute +// +// f_best - f > FunctionConverge.Relative * maxabs(f, f_best) + FunctionConverge.Absolute +// // If the decrease is significant, then the iteration counter is reset and // f_best is updated. // diff --git a/optimize/functions/functions.go b/optimize/functions/functions.go index 9a32c74a..233d71dc 100644 --- a/optimize/functions/functions.go +++ b/optimize/functions/functions.go @@ -14,15 +14,16 @@ import ( // Beale implements the Beale's function. // // Standard starting points: -// Easy: [1, 1] -// Hard: [1, 4] +// +// Easy: [1, 1] +// Hard: [1, 4] // // References: -// - Beale, E.: On an Iterative Method for Finding a Local Minimum of a -// Function of More than One Variable. Technical Report 25, Statistical -// Techniques Research Group, Princeton University (1958) -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Beale, E.: On an Iterative Method for Finding a Local Minimum of a +// Function of More than One Variable. Technical Report 25, Statistical +// Techniques Research Group, Princeton University (1958) +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type Beale struct{} func (Beale) Func(x []float64) float64 { @@ -92,11 +93,13 @@ func (Beale) Minima() []Minimum { // BiggsEXP2 implements the Biggs' EXP2 function. // // Standard starting point: -// [1, 2] +// +// [1, 2] // // Reference: -// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties -// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 +// +// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties +// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 type BiggsEXP2 struct{} func (BiggsEXP2) Func(x []float64) (sum float64) { @@ -150,11 +153,13 @@ func (BiggsEXP2) Minima() []Minimum { // BiggsEXP3 implements the Biggs' EXP3 function. // // Standard starting point: -// [1, 2, 1] +// +// [1, 2, 1] // // Reference: -// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties -// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 +// +// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties +// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 type BiggsEXP3 struct{} func (BiggsEXP3) Func(x []float64) (sum float64) { @@ -210,11 +215,13 @@ func (BiggsEXP3) Minima() []Minimum { // BiggsEXP4 implements the Biggs' EXP4 function. // // Standard starting point: -// [1, 2, 1, 1] +// +// [1, 2, 1, 1] // // Reference: -// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties -// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 +// +// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties +// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 type BiggsEXP4 struct{} func (BiggsEXP4) Func(x []float64) (sum float64) { @@ -272,11 +279,13 @@ func (BiggsEXP4) Minima() []Minimum { // BiggsEXP5 implements the Biggs' EXP5 function. // // Standard starting point: -// [1, 2, 1, 1, 1] +// +// [1, 2, 1, 1, 1] // // Reference: -// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties -// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 +// +// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties +// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 type BiggsEXP5 struct{} func (BiggsEXP5) Func(x []float64) (sum float64) { @@ -336,14 +345,15 @@ func (BiggsEXP5) Minima() []Minimum { // BiggsEXP6 implements the Biggs' EXP6 function. // // Standard starting point: -// [1, 2, 1, 1, 1, 1] +// +// [1, 2, 1, 1, 1, 1] // // References: -// - Biggs, M.C.: Minimization algorithms making use of non-quadratic -// properties of the objective function. IMA J Appl Math 8 (1971), 315-327; -// doi:10.1093/imamat/8.3.315 -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Biggs, M.C.: Minimization algorithms making use of non-quadratic +// properties of the objective function. IMA J Appl Math 8 (1971), 315-327; +// doi:10.1093/imamat/8.3.315 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type BiggsEXP6 struct{} func (BiggsEXP6) Func(x []float64) (sum float64) { @@ -417,13 +427,14 @@ func (BiggsEXP6) Minima() []Minimum { // Box3D implements the Box' three-dimensional function. // // Standard starting point: -// [0, 10, 20] +// +// [0, 10, 20] // // References: -// - Box, M.J.: A comparison of several current optimization methods, and the -// use of transformations in constrained problems. Comput J 9 (1966), 67-77 -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Box, M.J.: A comparison of several current optimization methods, and the +// use of transformations in constrained problems. Comput J 9 (1966), 67-77 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type Box3D struct{} func (Box3D) Func(x []float64) (sum float64) { @@ -485,11 +496,14 @@ func (Box3D) Minima() []Minimum { // BraninHoo implements the Branin-Hoo function. BraninHoo is a 2-dimensional // test function with three global minima. It is typically evaluated in the domain // x_0 ∈ [-5, 10], x_1 ∈ [0, 15]. -// f(x) = (x_1 - (5.1/(4π^2))*x_0^2 + (5/π)*x_0 - 6)^2 + 10*(1-1/(8π))cos(x_0) + 10 +// +// f(x) = (x_1 - (5.1/(4π^2))*x_0^2 + (5/π)*x_0 - 6)^2 + 10*(1-1/(8π))cos(x_0) + 10 +// // It has a minimum value of 0.397887 at x^* = {(-π, 12.275), (π, 2.275), (9.424778, 2.475)} // // Reference: -// https://www.sfu.ca/~ssurjano/branin.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/branin.html (obtained June 2017) type BraninHoo struct{} func (BraninHoo) Func(x []float64) float64 { @@ -525,11 +539,12 @@ func (BraninHoo) Minima() []Minimum { // BrownBadlyScaled implements the Brown's badly scaled function. // // Standard starting point: -// [1, 1] +// +// [1, 1] // // References: -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type BrownBadlyScaled struct{} func (BrownBadlyScaled) Func(x []float64) float64 { @@ -587,14 +602,15 @@ func (BrownBadlyScaled) Minima() []Minimum { // BrownAndDennis implements the Brown and Dennis function. // // Standard starting point: -// [25, 5, -5, -1] +// +// [25, 5, -5, -1] // // References: -// - Brown, K.M., Dennis, J.E.: New computational algorithms for minimizing a -// sum of squares of nonlinear functions. Research Report Number 71-6, Yale -// University (1971) -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Brown, K.M., Dennis, J.E.: New computational algorithms for minimizing a +// sum of squares of nonlinear functions. Research Report Number 71-6, Yale +// University (1971) +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type BrownAndDennis struct{} func (BrownAndDennis) Func(x []float64) (sum float64) { @@ -689,14 +705,15 @@ func (BrownAndDennis) Minima() []Minimum { // Its Hessian matrix is singular at the minimizer. // // Standard starting point: -// [3, -1, 0, 3, 3, -1, 0, 3, ..., 3, -1, 0, 3] +// +// [3, -1, 0, 3, 3, -1, 0, 3, ..., 3, -1, 0, 3] // // References: -// - Spedicato E.: Computational experience with quasi-Newton algorithms for -// minimization problems of moderatly large size. Towards Global -// Optimization 2 (1978), 209-219 -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Spedicato E.: Computational experience with quasi-Newton algorithms for +// minimization problems of moderatly large size. Towards Global +// Optimization 2 (1978), 209-219 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type ExtendedPowellSingular struct{} func (ExtendedPowellSingular) Func(x []float64) (sum float64) { @@ -762,13 +779,14 @@ func (ExtendedPowellSingular) Minima() []Minimum { // function. // // Standard starting point: -// Easy: [-1.2, 1, -1.2, 1, ...] -// Hard: any point far from the minimum +// +// Easy: [-1.2, 1, -1.2, 1, ...] +// Hard: any point far from the minimum // // References: -// - Rosenbrock, H.H.: An Automatic Method for Finding the Greatest or Least -// Value of a Function. Computer J 3 (1960), 175-184 -// - http://en.wikipedia.org/wiki/Rosenbrock_function +// - Rosenbrock, H.H.: An Automatic Method for Finding the Greatest or Least +// Value of a Function. Computer J 3 (1960), 175-184 +// - http://en.wikipedia.org/wiki/Rosenbrock_function type ExtendedRosenbrock struct{} func (ExtendedRosenbrock) Func(x []float64) (sum float64) { @@ -858,11 +876,13 @@ func (ExtendedRosenbrock) Minima() []Minimum { // caused by the finite floating point precision. // // Standard starting point: -// [0.4, 1, 0] +// +// [0.4, 1, 0] // // Reference: -// More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained optimization -// software. ACM Trans Math Softw 7 (1981), 17-41 +// +// More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained optimization +// software. ACM Trans Math Softw 7 (1981), 17-41 type Gaussian struct{} func (Gaussian) y(i int) (yi float64) { @@ -939,14 +959,15 @@ func (Gaussian) Minima() []Minimum { // GulfResearchAndDevelopment implements the Gulf Research and Development function. // // Standard starting point: -// [5, 2.5, 0.15] +// +// [5, 2.5, 0.15] // // References: -// - Cox, R.A.: Comparison of the performance of seven optimization algorithms -// on twelve unconstrained minimization problems. Ref. 1335CNO4, Gulf -// Research and Development Company, Pittsburg (1969) -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Cox, R.A.: Comparison of the performance of seven optimization algorithms +// on twelve unconstrained minimization problems. Ref. 1335CNO4, Gulf +// Research and Development Company, Pittsburg (1969) +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type GulfResearchAndDevelopment struct{} func (GulfResearchAndDevelopment) Func(x []float64) (sum float64) { @@ -1016,13 +1037,14 @@ func (GulfResearchAndDevelopment) Minima() []Minimum { // Function is not defined at x[0] = 0. // // Standard starting point: -// [-1, 0, 0] +// +// [-1, 0, 0] // // References: -// - Fletcher, R., Powell, M.J.D.: A rapidly convergent descent method for -// minimization. Comput J 6 (1963), 163-168 -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Fletcher, R., Powell, M.J.D.: A rapidly convergent descent method for +// minimization. Comput J 6 (1963), 163-168 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type HelicalValley struct{} func (HelicalValley) Func(x []float64) float64 { @@ -1098,14 +1120,15 @@ func (Linear) Grad(grad, x []float64) []float64 { // PenaltyI implements the first penalty function by Gill, Murray and Pitfield. // // Standard starting point: -// [1, ..., n] +// +// [1, ..., n] // // References: -// - Gill, P.E., Murray, W., Pitfield, R.A.: The implementation of two revised -// quasi-Newton algorithms for unconstrained optimization. Report NAC 11, -// National Phys Lab (1972), 82-83 -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Gill, P.E., Murray, W., Pitfield, R.A.: The implementation of two revised +// quasi-Newton algorithms for unconstrained optimization. Report NAC 11, +// National Phys Lab (1972), 82-83 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type PenaltyI struct{} func (PenaltyI) Func(x []float64) (sum float64) { @@ -1156,14 +1179,15 @@ func (PenaltyI) Minima() []Minimum { // PenaltyII implements the second penalty function by Gill, Murray and Pitfield. // // Standard starting point: -// [0.5, ..., 0.5] +// +// [0.5, ..., 0.5] // // References: -// - Gill, P.E., Murray, W., Pitfield, R.A.: The implementation of two revised -// quasi-Newton algorithms for unconstrained optimization. Report NAC 11, -// National Phys Lab (1972), 82-83 -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Gill, P.E., Murray, W., Pitfield, R.A.: The implementation of two revised +// quasi-Newton algorithms for unconstrained optimization. Report NAC 11, +// National Phys Lab (1972), 82-83 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type PenaltyII struct{} func (PenaltyII) Func(x []float64) (sum float64) { @@ -1236,14 +1260,15 @@ func (PenaltyII) Minima() []Minimum { // that gives f(x) ≅ 1e-13. // // Standard starting point: -// [0, 1] +// +// [0, 1] // // References: -// - Powell, M.J.D.: A Hybrid Method for Nonlinear Equations. Numerical -// Methods for Nonlinear Algebraic Equations, P. Rabinowitz (ed.), Gordon -// and Breach (1970) -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Powell, M.J.D.: A Hybrid Method for Nonlinear Equations. Numerical +// Methods for Nonlinear Algebraic Equations, P. Rabinowitz (ed.), Gordon +// and Breach (1970) +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type PowellBadlyScaled struct{} func (PowellBadlyScaled) Func(x []float64) float64 { @@ -1304,14 +1329,15 @@ func (PowellBadlyScaled) Minima() []Minimum { // Trigonometric implements the trigonometric function. // // Standard starting point: -// [1/dim, ..., 1/dim] +// +// [1/dim, ..., 1/dim] // // References: -// - Spedicato E.: Computational experience with quasi-Newton algorithms for -// minimization problems of moderatly large size. Towards Global -// Optimization 2 (1978), 209-219 -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Spedicato E.: Computational experience with quasi-Newton algorithms for +// minimization problems of moderatly large size. Towards Global +// Optimization 2 (1978), 209-219 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type Trigonometric struct{} func (Trigonometric) Func(x []float64) (sum float64) { @@ -1374,11 +1400,13 @@ func (Trigonometric) Minima() []Minimum { // VariablyDimensioned implements a variably dimensioned function. // // Standard starting point: -// [..., (dim-i)/dim, ...], i=1,...,dim +// +// [..., (dim-i)/dim, ...], i=1,...,dim // // References: -// More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained optimization -// software. ACM Trans Math Softw 7 (1981), 17-41 +// +// More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained optimization +// software. ACM Trans Math Softw 7 (1981), 17-41 type VariablyDimensioned struct{} func (VariablyDimensioned) Func(x []float64) (sum float64) { @@ -1447,13 +1475,14 @@ func (VariablyDimensioned) Minima() []Minimum { // of minimizing the function is very ill conditioned. // // Standard starting point: -// [0, ..., 0] +// +// [0, ..., 0] // // References: -// - Kowalik, J.S., Osborne, M.R.: Methods for Unconstrained Optimization -// Problems. Elsevier North-Holland, New York, 1968 -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Kowalik, J.S., Osborne, M.R.: Methods for Unconstrained Optimization +// Problems. Elsevier North-Holland, New York, 1968 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type Watson struct{} func (Watson) Func(x []float64) (sum float64) { @@ -1597,13 +1626,14 @@ func (Watson) Minima() []Minimum { // Wood implements the Wood's function. // // Standard starting point: -// [-3, -1, -3, -1] +// +// [-3, -1, -3, -1] // // References: -// - Colville, A.R.: A comparative study of nonlinear programming codes. -// Report 320-2949, IBM New York Scientific Center (1968) -// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained -// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +// - Colville, A.R.: A comparative study of nonlinear programming codes. +// Report 320-2949, IBM New York Scientific Center (1968) +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 type Wood struct{} func (Wood) Func(x []float64) (sum float64) { @@ -1674,8 +1704,9 @@ func (Wood) Minima() []Minimum { // of the minimizer which is located at x=sqrt(2). // // References: -// More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease. -// ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.1) +// +// More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease. +// ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.1) type ConcaveRight struct{} func (ConcaveRight) Func(x []float64) float64 { @@ -1700,8 +1731,9 @@ func (ConcaveRight) Grad(grad, x []float64) { // the minimizer which is located at x=399/250=1.596. // // References: -// More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease. -// ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.2) +// +// More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease. +// ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.2) type ConcaveLeft struct{} func (ConcaveLeft) Func(x []float64) float64 { @@ -1729,8 +1761,9 @@ func (ConcaveLeft) Grad(grad, x []float64) { // on the derivative is unreliable due to the oscillations. // // References: -// More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease. -// ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.3) +// +// More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease. +// ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.3) type Plassmann struct { L float64 // Number of oscillations for |x-1| ≥ Beta. Beta float64 // Size of the derivative at zero, f'(0) = -Beta. @@ -1783,10 +1816,10 @@ func (f Plassmann) Grad(grad, x []float64) { // the parameter values. // // References: -// - More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease. -// ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.4) -// - Yanai, H., Ozawa, M., and Kaneko, S.: Interpolation methods in one dimensional -// optimization. Computing 27 (1981), 155–163 +// - More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease. +// ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.4) +// - Yanai, H., Ozawa, M., and Kaneko, S.: Interpolation methods in one dimensional +// optimization. Computing 27 (1981), 155–163 type YanaiOzawaKaneko struct { Beta1 float64 Beta2 float64 diff --git a/optimize/functions/minsurf.go b/optimize/functions/minsurf.go index 140506f3..510966b7 100644 --- a/optimize/functions/minsurf.go +++ b/optimize/functions/minsurf.go @@ -14,8 +14,9 @@ import ( // values in a unit square centered at the origin. // // References: -// Averick, M.B., Carter, R.G., Moré, J.J., Xue, G.-L.: The Minpack-2 Test -// Problem Collection. Preprint MCS-P153-0692, Argonne National Laboratory (1992) +// +// Averick, M.B., Carter, R.G., Moré, J.J., Xue, G.-L.: The Minpack-2 Test +// Problem Collection. Preprint MCS-P153-0692, Argonne National Laboratory (1992) type MinimalSurface struct { bottom, top []float64 left, right []float64 @@ -163,10 +164,13 @@ func (ms *MinimalSurface) ExactX() []float64 { // ExactSolution returns the value of the exact solution to the minimal surface // problem at (x,y). The exact solution is -// F_exact(x,y) = U^2(x,y) - V^2(x,y), +// +// F_exact(x,y) = U^2(x,y) - V^2(x,y), +// // where U and V are the unique solutions to the equations -// x = u + uv^2 - u^3/3, -// y = -v - u^2v + v^3/3. +// +// x = u + uv^2 - u^3/3, +// y = -v - u^2v + v^3/3. func (ms *MinimalSurface) ExactSolution(x, y float64) float64 { var u = [2]float64{x, -y} var f [2]float64 diff --git a/optimize/functions/vlse.go b/optimize/functions/vlse.go index 16a1ab41..6f5557a6 100644 --- a/optimize/functions/vlse.go +++ b/optimize/functions/vlse.go @@ -15,11 +15,14 @@ import "math" // Ackley implements the Ackley function, a function of arbitrary dimension that // has many local minima. It has a single global minimum of 0 at 0. Its typical // domain is the hypercube of [-32.768, 32.768]^d. -// f(x) = -20 * exp(-0.2 sqrt(1/d sum_i x_i^2)) - exp(1/d sum_i cos(2π x_i)) + 20 + exp(1) +// +// f(x) = -20 * exp(-0.2 sqrt(1/d sum_i x_i^2)) - exp(1/d sum_i cos(2π x_i)) + 20 + exp(1) +// // where d is the input dimension. // // Reference: -// https://www.sfu.ca/~ssurjano/ackley.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/ackley.html (obtained June 2017) type Ackley struct{} func (Ackley) Func(x []float64) float64 { @@ -35,9 +38,12 @@ func (Ackley) Func(x []float64) float64 { // Bukin6 implements Bukin's 6th function. The function is two-dimensional, with // the typical domain as x_0 ∈ [-15, -5], x_1 ∈ [-3, 3]. The function has a unique // global minimum at [-10, 1], and many local minima. -// f(x) = 100 * sqrt(|x_1 - 0.01*x_0^2|) + 0.01*|x_0+10| +// +// f(x) = 100 * sqrt(|x_1 - 0.01*x_0^2|) + 0.01*|x_0+10| +// // Reference: -// https://www.sfu.ca/~ssurjano/bukin6.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/bukin6.html (obtained June 2017) type Bukin6 struct{} func (Bukin6) Func(x []float64) float64 { @@ -50,13 +56,18 @@ func (Bukin6) Func(x []float64) float64 { // CamelThree implements the three-hump camel function, a two-dimensional function // with three local minima, one of which is global. // The function is given by -// f(x) = 2*x_0^2 - 1.05*x_0^4 + x_0^6/6 + x_0*x_1 + x_1^2 +// +// f(x) = 2*x_0^2 - 1.05*x_0^4 + x_0^6/6 + x_0*x_1 + x_1^2 +// // with the global minimum at -// x^* = (0, 0) -// f(x^*) = 0 +// +// x^* = (0, 0) +// f(x^*) = 0 +// // The typical domain is x_i ∈ [-5, 5] for all i. // Reference: -// https://www.sfu.ca/~ssurjano/camel3.html (obtained December 2017) +// +// https://www.sfu.ca/~ssurjano/camel3.html (obtained December 2017) type CamelThree struct{} func (c CamelThree) Func(x []float64) float64 { @@ -73,13 +84,18 @@ func (c CamelThree) Func(x []float64) float64 { // CamelSix implements the six-hump camel function, a two-dimensional function. // with six local minima, two of which are global. // The function is given by -// f(x) = (4 - 2.1*x_0^2 + x_0^4/3)*x_0^2 + x_0*x_1 + (-4 + 4*x_1^2)*x_1^2 +// +// f(x) = (4 - 2.1*x_0^2 + x_0^4/3)*x_0^2 + x_0*x_1 + (-4 + 4*x_1^2)*x_1^2 +// // with the global minima at -// x^* = (0.0898, -0.7126), (-0.0898, 0.7126) -// f(x^*) = -1.0316 +// +// x^* = (0.0898, -0.7126), (-0.0898, 0.7126) +// f(x^*) = -1.0316 +// // The typical domain is x_0 ∈ [-3, 3], x_1 ∈ [-2, 2]. // Reference: -// https://www.sfu.ca/~ssurjano/camel6.html (obtained December 2017) +// +// https://www.sfu.ca/~ssurjano/camel6.html (obtained December 2017) type CamelSix struct{} func (c CamelSix) Func(x []float64) float64 { @@ -97,9 +113,12 @@ func (c CamelSix) Func(x []float64) float64 { // is a two-dimensional function with many local minima, and four global minima // at (±1.3491, ±1.3491). The function is typically evaluated in the square // [-10,10]^2. -// f(x) = -0.001(|sin(x_0)sin(x_1)exp(|100-sqrt((x_0^2+x_1^2)/π)|)|+1)^0.1 +// +// f(x) = -0.001(|sin(x_0)sin(x_1)exp(|100-sqrt((x_0^2+x_1^2)/π)|)|+1)^0.1 +// // Reference: -// https://www.sfu.ca/~ssurjano/crossit.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/crossit.html (obtained June 2017) type CrossInTray struct{} func (CrossInTray) Func(x []float64) float64 { @@ -115,13 +134,18 @@ func (CrossInTray) Func(x []float64) float64 { // DixonPrice implements the DixonPrice function, a function of arbitrary dimension // Its typical domain is the hypercube of [-10, 10]^d. // The function is given by -// f(x) = (x_0-1)^2 + \sum_{i=1}^{d-1} (i+1) * (2*x_i^2-x_{i-1})^2 +// +// f(x) = (x_0-1)^2 + \sum_{i=1}^{d-1} (i+1) * (2*x_i^2-x_{i-1})^2 +// // where d is the input dimension. There is a single global minimum, which has // a location and value of -// x_i^* = 2^{-(2^{i+1}-2)/(2^{i+1})} for i = 0, ..., d-1. -// f(x^*) = 0 +// +// x_i^* = 2^{-(2^{i+1}-2)/(2^{i+1})} for i = 0, ..., d-1. +// f(x^*) = 0 +// // Reference: -// https://www.sfu.ca/~ssurjano/dixonpr.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/dixonpr.html (obtained June 2017) type DixonPrice struct{} func (DixonPrice) Func(x []float64) float64 { @@ -139,9 +163,12 @@ func (DixonPrice) Func(x []float64) float64 { // DropWave implements the drop-wave function, a two-dimensional function with // many local minima and one global minimum at 0. The function is typically evaluated // in the square [-5.12, 5.12]^2. -// f(x) = - (1+cos(12*sqrt(x0^2+x1^2))) / (0.5*(x0^2+x1^2)+2) +// +// f(x) = - (1+cos(12*sqrt(x0^2+x1^2))) / (0.5*(x0^2+x1^2)+2) +// // Reference: -// https://www.sfu.ca/~ssurjano/drop.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/drop.html (obtained June 2017) type DropWave struct{} func (DropWave) Func(x []float64) float64 { @@ -158,9 +185,12 @@ func (DropWave) Func(x []float64) float64 { // Eggholder implements the Eggholder function, a two-dimensional function with // many local minima and one global minimum at [512, 404.2319]. The function // is typically evaluated in the square [-512, 512]^2. -// f(x) = -(x_1+47)*sin(sqrt(|x_1+x_0/2+47|))-x_1*sin(sqrt(|x_0-(x_1+47)|)) +// +// f(x) = -(x_1+47)*sin(sqrt(|x_1+x_0/2+47|))-x_1*sin(sqrt(|x_0-(x_1+47)|)) +// // Reference: -// https://www.sfu.ca/~ssurjano/egg.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/egg.html (obtained June 2017) type Eggholder struct{} func (Eggholder) Func(x []float64) float64 { @@ -175,9 +205,12 @@ func (Eggholder) Func(x []float64) float64 { // GramacyLee implements the Gramacy-Lee function, a one-dimensional function // with many local minima. The function is typically evaluated on the domain [0.5, 2.5]. -// f(x) = sin(10πx)/(2x) + (x-1)^4 +// +// f(x) = sin(10πx)/(2x) + (x-1)^4 +// // Reference: -// https://www.sfu.ca/~ssurjano/grlee12.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/grlee12.html (obtained June 2017) type GramacyLee struct{} func (GramacyLee) Func(x []float64) float64 { @@ -191,11 +224,14 @@ func (GramacyLee) Func(x []float64) float64 { // Griewank implements the Griewank function, a function of arbitrary dimension that // has many local minima. It has a single global minimum of 0 at 0. Its typical // domain is the hypercube of [-600, 600]^d. -// f(x) = \sum_i x_i^2/4000 - \prod_i cos(x_i/sqrt(i)) + 1 +// +// f(x) = \sum_i x_i^2/4000 - \prod_i cos(x_i/sqrt(i)) + 1 +// // where d is the input dimension. // // Reference: -// https://www.sfu.ca/~ssurjano/griewank.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/griewank.html (obtained June 2017) type Griewank struct{} func (Griewank) Func(x []float64) float64 { @@ -211,9 +247,12 @@ func (Griewank) Func(x []float64) float64 { // HolderTable implements the Holder table function. The Holder table function // is a two-dimensional function with many local minima, and four global minima // at (±8.05502, ±9.66459). The function is typically evaluated in the square [-10,10]^2. -// f(x) = -|sin(x_0)cos(x1)exp(|1-sqrt(x_0^2+x1^2)/π|)| +// +// f(x) = -|sin(x_0)cos(x1)exp(|1-sqrt(x_0^2+x1^2)/π|)| +// // Reference: -// https://www.sfu.ca/~ssurjano/holder.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/holder.html (obtained June 2017) type HolderTable struct{} func (HolderTable) Func(x []float64) float64 { @@ -228,11 +267,14 @@ func (HolderTable) Func(x []float64) float64 { // Langermann2 implements the two-dimensional version of the Langermann function. // The Langermann function has many local minima. The function is typically // evaluated in the square [0,10]^2. -// f(x) = \sum_1^5 c_i exp(-(1/π)\sum_{j=1}^2(x_j-A_{ij})^2) * cos(π\sum_{j=1}^2 (x_j - A_{ij})^2) -// c = [5]float64{1,2,5,2,3} -// A = [5][2]float64{{3,5},{5,2},{2,1},{1,4},{7,9}} +// +// f(x) = \sum_1^5 c_i exp(-(1/π)\sum_{j=1}^2(x_j-A_{ij})^2) * cos(π\sum_{j=1}^2 (x_j - A_{ij})^2) +// c = [5]float64{1,2,5,2,3} +// A = [5][2]float64{{3,5},{5,2},{2,1},{1,4},{7,9}} +// // Reference: -// https://www.sfu.ca/~ssurjano/langer.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/langer.html (obtained June 2017) type Langermann2 struct{} func (Langermann2) Func(x []float64) float64 { @@ -258,13 +300,16 @@ func (Langermann2) Func(x []float64) float64 { // Levy implements the Levy function, a function of arbitrary dimension that // has many local minima. It has a single global minimum of 0 at 1. Its typical // domain is the hypercube of [-10, 10]^d. -// f(x) = sin^2(π*w_0) + \sum_{i=0}^{d-2}(w_i-1)^2*[1+10sin^2(π*w_i+1)] + -// (w_{d-1}-1)^2*[1+sin^2(2π*w_{d-1})] -// w_i = 1 + (x_i-1)/4 +// +// f(x) = sin^2(π*w_0) + \sum_{i=0}^{d-2}(w_i-1)^2*[1+10sin^2(π*w_i+1)] + +// (w_{d-1}-1)^2*[1+sin^2(2π*w_{d-1})] +// w_i = 1 + (x_i-1)/4 +// // where d is the input dimension. // // Reference: -// https://www.sfu.ca/~ssurjano/levy.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/levy.html (obtained June 2017) type Levy struct{} func (Levy) Func(x []float64) float64 { @@ -284,9 +329,12 @@ func (Levy) Func(x []float64) float64 { // Levy13 implements the Levy-13 function, a two-dimensional function // with many local minima. It has a single global minimum of 0 at 1. Its typical // domain is the square [-10, 10]^2. -// f(x) = sin^2(3π*x_0) + (x_0-1)^2*[1+sin^2(3π*x_1)] + (x_1-1)^2*[1+sin^2(2π*x_1)] +// +// f(x) = sin^2(3π*x_0) + (x_0-1)^2*[1+sin^2(3π*x_1)] + (x_1-1)^2*[1+sin^2(2π*x_1)] +// // Reference: -// https://www.sfu.ca/~ssurjano/levy13.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/levy13.html (obtained June 2017) type Levy13 struct{} func (Levy13) Func(x []float64) float64 { @@ -304,11 +352,14 @@ func (Levy13) Func(x []float64) float64 { // Rastrigin implements the Rastrigen function, a function of arbitrary dimension // that has many local minima. It has a single global minimum of 0 at 0. Its typical // domain is the hypercube of [-5.12, 5.12]^d. -// f(x) = 10d + \sum_i [x_i^2 - 10cos(2π*x_i)] +// +// f(x) = 10d + \sum_i [x_i^2 - 10cos(2π*x_i)] +// // where d is the input dimension. // // Reference: -// https://www.sfu.ca/~ssurjano/rastr.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/rastr.html (obtained June 2017) type Rastrigin struct{} func (Rastrigin) Func(x []float64) float64 { @@ -322,9 +373,12 @@ func (Rastrigin) Func(x []float64) float64 { // Schaffer2 implements the second Schaffer function, a two-dimensional function // with many local minima. It has a single global minimum of 0 at 0. Its typical // domain is the square [-100, 100]^2. -// f(x) = 0.5 + (sin^2(x_0^2-x_1^2)-0.5) / (1+0.001*(x_0^2+x_1^2))^2 +// +// f(x) = 0.5 + (sin^2(x_0^2-x_1^2)-0.5) / (1+0.001*(x_0^2+x_1^2))^2 +// // Reference: -// https://www.sfu.ca/~ssurjano/schaffer2.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/schaffer2.html (obtained June 2017) type Schaffer2 struct{} func (Schaffer2) Func(x []float64) float64 { @@ -340,9 +394,12 @@ func (Schaffer2) Func(x []float64) float64 { // Schaffer4 implements the fourth Schaffer function, a two-dimensional function // with many local minima. Its typical domain is the square [-100, 100]^2. -// f(x) = 0.5 + (cos(sin(|x_0^2-x_1^2|))-0.5) / (1+0.001*(x_0^2+x_1^2))^2 +// +// f(x) = 0.5 + (cos(sin(|x_0^2-x_1^2|))-0.5) / (1+0.001*(x_0^2+x_1^2))^2 +// // Reference: -// https://www.sfu.ca/~ssurjano/schaffer4.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/schaffer4.html (obtained June 2017) type Schaffer4 struct{} func (Schaffer4) Func(x []float64) float64 { @@ -357,11 +414,14 @@ func (Schaffer4) Func(x []float64) float64 { // Schwefel implements the Schwefel function, a function of arbitrary dimension // that has many local minima. Its typical domain is the hypercube of [-500, 500]^d. -// f(x) = 418.9829*d - \sum_i x_i*sin(sqrt(|x_i|)) +// +// f(x) = 418.9829*d - \sum_i x_i*sin(sqrt(|x_i|)) +// // where d is the input dimension. // // Reference: -// https://www.sfu.ca/~ssurjano/schwef.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/schwef.html (obtained June 2017) type Schwefel struct{} func (Schwefel) Func(x []float64) float64 { @@ -375,9 +435,12 @@ func (Schwefel) Func(x []float64) float64 { // Shubert implements the Shubert function, a two-dimensional function // with many local minima and many global minima. Its typical domain is the // square [-10, 10]^2. -// f(x) = (sum_{i=1}^5 i cos((i+1)*x_0+i)) * (\sum_{i=1}^5 i cos((i+1)*x_1+i)) +// +// f(x) = (sum_{i=1}^5 i cos((i+1)*x_0+i)) * (\sum_{i=1}^5 i cos((i+1)*x_1+i)) +// // Reference: -// https://www.sfu.ca/~ssurjano/shubert.html (obtained June 2017) +// +// https://www.sfu.ca/~ssurjano/shubert.html (obtained June 2017) type Shubert struct{} func (Shubert) Func(x []float64) float64 { diff --git a/optimize/interfaces.go b/optimize/interfaces.go index e28fa00a..09d395a2 100644 --- a/optimize/interfaces.go +++ b/optimize/interfaces.go @@ -69,7 +69,9 @@ type Statuser interface { // (approximate) minimum of the objective function along the search direction // dir_k starting at the most recent location x_k, i.e., it tries to minimize // the function -// φ(step) := f(x_k + step * dir_k) where step > 0. +// +// φ(step) := f(x_k + step * dir_k) where step > 0. +// // Typically, a Linesearcher will be used in conjunction with LinesearchMethod // for performing gradient-based optimization through sequential line searches. type Linesearcher interface { diff --git a/optimize/linesearch.go b/optimize/linesearch.go index 740fcf68..0fb1dd6c 100644 --- a/optimize/linesearch.go +++ b/optimize/linesearch.go @@ -181,9 +181,9 @@ func (ls *LinesearchMethod) initNextLinesearch(loc *Location) (Operation, error) // ArmijoConditionMet returns true if the Armijo condition (aka sufficient // decrease) has been met. Under normal conditions, the following should be // true, though this is not enforced: -// - initGrad < 0 -// - step > 0 -// - 0 < decrease < 1 +// - initGrad < 0 +// - step > 0 +// - 0 < decrease < 1 func ArmijoConditionMet(currObj, initObj, initGrad, step, decrease float64) bool { return currObj <= initObj+decrease*step*initGrad } @@ -193,9 +193,9 @@ func ArmijoConditionMet(currObj, initObj, initGrad, step, decrease float64) bool // value, and sufficient decrease in the magnitude of the projected gradient. // Under normal conditions, the following should be true, though this is not // enforced: -// - initGrad < 0 -// - step > 0 -// - 0 <= decrease < curvature < 1 +// - initGrad < 0 +// - step > 0 +// - 0 <= decrease < curvature < 1 func StrongWolfeConditionsMet(currObj, currGrad, initObj, initGrad, step, decrease, curvature float64) bool { if currObj > initObj+decrease*step*initGrad { return false @@ -207,9 +207,9 @@ func StrongWolfeConditionsMet(currObj, currGrad, initObj, initGrad, step, decrea // The weak Wolfe conditions ensure sufficient decrease in the function value, // and sufficient decrease in the value of the projected gradient. Under normal // conditions, the following should be true, though this is not enforced: -// - initGrad < 0 -// - step > 0 -// - 0 <= decrease < curvature< 1 +// - initGrad < 0 +// - step > 0 +// - 0 <= decrease < curvature< 1 func WeakWolfeConditionsMet(currObj, currGrad, initObj, initGrad, step, decrease, curvature float64) bool { if currObj > initObj+decrease*step*initGrad { return false diff --git a/optimize/morethuente.go b/optimize/morethuente.go index ed6fcecd..cb23890c 100644 --- a/optimize/morethuente.go +++ b/optimize/morethuente.go @@ -12,8 +12,8 @@ var _ Linesearcher = (*MoreThuente)(nil) // sufficient decrease and curvature conditions (the strong Wolfe conditions). // // References: -// - More, J.J. and D.J. Thuente: Line Search Algorithms with Guaranteed Sufficient -// Decrease. ACM Transactions on Mathematical Software 20(3) (1994), 286-307 +// - More, J.J. and D.J. Thuente: Line Search Algorithms with Guaranteed Sufficient +// Decrease. ACM Transactions on Mathematical Software 20(3) (1994), 286-307 type MoreThuente struct { // DecreaseFactor is the constant factor in the sufficient decrease // (Armijo) condition. diff --git a/optimize/neldermead.go b/optimize/neldermead.go index d92db09e..5118fd4c 100644 --- a/optimize/neldermead.go +++ b/optimize/neldermead.go @@ -49,7 +49,7 @@ var _ Method = (*NelderMead)(nil) // simplex algorithm for linear programming). The implementation follows the // algorithm described in // -// http://epubs.siam.org/doi/pdf/10.1137/S1052623496303470 +// http://epubs.siam.org/doi/pdf/10.1137/S1052623496303470 // // If an initial simplex is provided, it is used and initLoc is ignored. If // InitialVertices and InitialValues are both nil, an initial simplex will be @@ -60,7 +60,7 @@ var _ Method = (*NelderMead)(nil) // are zero, they will be set automatically based on the dimension according to // the recommendations in // -// http://www.webpages.uidaho.edu/~fuchang/res/ANMS.pdf +// http://www.webpages.uidaho.edu/~fuchang/res/ANMS.pdf type NelderMead struct { InitialVertices [][]float64 InitialValues []float64 diff --git a/optimize/newton.go b/optimize/newton.go index 794b2685..bd29a08b 100644 --- a/optimize/newton.go +++ b/optimize/newton.go @@ -25,8 +25,10 @@ var ( // Newton iteratively forms a quadratic model to the objective function f and // tries to minimize this approximate model. It generates a sequence of // locations x_k by means of -// solve H_k d_k = -∇f_k for d_k, -// x_{k+1} = x_k + α_k d_k, +// +// solve H_k d_k = -∇f_k for d_k, +// x_{k+1} = x_k + α_k d_k, +// // where H_k is the Hessian matrix of f at x_k and α_k is a step size found by // a line search. // diff --git a/optimize/stepsizers.go b/optimize/stepsizers.go index 74827f87..6508b573 100644 --- a/optimize/stepsizers.go +++ b/optimize/stepsizers.go @@ -134,7 +134,9 @@ func (q *QuadraticStepSize) StepSize(loc *Location, dir []float64) (stepSize flo // assumption that the first-order change in the function will be the same as // that obtained at the previous iteration. That is, the initial step size s^0_k // is chosen so that -// s^0_k ∇f_k⋅p_k = s_{k-1} ∇f_{k-1}⋅p_{k-1} +// +// s^0_k ∇f_k⋅p_k = s_{k-1} ∇f_{k-1}⋅p_{k-1} +// // This is useful for line search methods that do not produce well-scaled // descent directions, such as gradient descent or conjugate gradient methods. type FirstOrderStepSize struct { diff --git a/spatial/kdtree/kdtree_user_type_example_test.go b/spatial/kdtree/kdtree_user_type_example_test.go index f367ee32..2b0778ea 100644 --- a/spatial/kdtree/kdtree_user_type_example_test.go +++ b/spatial/kdtree/kdtree_user_type_example_test.go @@ -91,8 +91,9 @@ type place struct { // Compare satisfies the axis comparisons method of the kdtree.Comparable interface. // The dimensions are: -// 0 = lat -// 1 = lon +// +// 0 = lat +// 1 = lon func (p place) Compare(c kdtree.Comparable, d kdtree.Dim) float64 { q := c.(place) switch d { diff --git a/spatial/r2/triangle.go b/spatial/r2/triangle.go index a64d5695..6b63b39f 100644 --- a/spatial/r2/triangle.go +++ b/spatial/r2/triangle.go @@ -79,8 +79,9 @@ type line [2]Vec // vecOnLine takes a value between 0 and 1 to linearly // interpolate a point on the line. -// vecOnLine(0) returns l[0] -// vecOnLine(1) returns l[1] +// +// vecOnLine(0) returns l[0] +// vecOnLine(1) returns l[1] func (l line) vecOnLine(t float64) Vec { lineDir := Sub(l[1], l[0]) return Add(l[0], Scale(t, lineDir)) diff --git a/spatial/r2/vector.go b/spatial/r2/vector.go index 2d06933d..ca982f36 100644 --- a/spatial/r2/vector.go +++ b/spatial/r2/vector.go @@ -51,13 +51,15 @@ func Rotate(p Vec, alpha float64, q Vec) Vec { } // Norm returns the Euclidean norm of p -// |p| = sqrt(p_x^2 + p_y^2). +// +// |p| = sqrt(p_x^2 + p_y^2). func Norm(p Vec) float64 { return math.Hypot(p.X, p.Y) } // Norm2 returns the Euclidean squared norm of p -// |p|^2 = p_x^2 + p_y^2. +// +// |p|^2 = p_x^2 + p_y^2. func Norm2(p Vec) float64 { return p.X*p.X + p.Y*p.Y } @@ -135,7 +137,8 @@ func absElem(a Vec) Vec { } // mulElem returns the Hadamard product between vectors a and b. -// v = {a.X*b.X, a.Y*b.Y, a.Z*b.Z} +// +// v = {a.X*b.X, a.Y*b.Y, a.Z*b.Z} func mulElem(a, b Vec) Vec { return Vec{ X: a.X * b.X, @@ -145,7 +148,8 @@ func mulElem(a, b Vec) Vec { // divElem returns the Hadamard product between vector a // and the inverse components of vector b. -// v = {a.X/b.X, a.Y/b.Y, a.Z/b.Z} +// +// v = {a.X/b.X, a.Y/b.Y, a.Z/b.Z} func divElem(a, b Vec) Vec { return Vec{ X: a.X / b.X, diff --git a/spatial/r3/box.go b/spatial/r3/box.go index e9130d65..b7552af6 100644 --- a/spatial/r3/box.go +++ b/spatial/r3/box.go @@ -45,11 +45,12 @@ func (a Box) Center() Vec { // for X and Y values and maximum Z value. // // Edges for the box can be constructed with the following indices: -// edges := [12][2]int{ -// {0, 1}, {1, 2}, {2, 3}, {3, 0}, -// {4, 5}, {5, 6}, {6, 7}, {7, 4}, -// {0, 4}, {1, 5}, {2, 6}, {3, 7}, -// } +// +// edges := [12][2]int{ +// {0, 1}, {1, 2}, {2, 3}, {3, 0}, +// {4, 5}, {5, 6}, {6, 7}, {7, 4}, +// {0, 4}, {1, 5}, {2, 6}, {3, 7}, +// } func (a Box) Vertices() []Vec { return []Vec{ 0: a.Min, diff --git a/spatial/r3/mat.go b/spatial/r3/mat.go index eaa3f47f..930ce42e 100644 --- a/spatial/r3/mat.go +++ b/spatial/r3/mat.go @@ -164,7 +164,8 @@ func (m *Mat) VecCol(j int) Vec { // Outer calculates the outer product of the vectors x and y, // where x and y are treated as column vectors, and stores the result in the receiver. -// m = alpha * x * yᵀ +// +// m = alpha * x * yᵀ func (m *Mat) Outer(alpha float64, x, y Vec) { ax := alpha * x.X ay := alpha * x.Y @@ -183,10 +184,11 @@ func (m *Mat) Outer(alpha float64, x, y Vec) { } // Det calculates the determinant of the receiver using the following formula -// ⎡a b c⎤ -// m = ⎢d e f⎥ -// ⎣g h i⎦ -// det(m) = a(ei − fh) − b(di − fg) + c(dh − eg) +// +// ⎡a b c⎤ +// m = ⎢d e f⎥ +// ⎣g h i⎦ +// det(m) = a(ei − fh) − b(di − fg) + c(dh − eg) func (m *Mat) Det() float64 { a := m.At(0, 0) b := m.At(0, 1) @@ -200,9 +202,10 @@ func (m *Mat) Det() float64 { // Skew sets the receiver to the 3×3 skew symmetric matrix // (right hand system) of v. -// ⎡ 0 -z y⎤ -// Skew({x,y,z}) = ⎢ z 0 -x⎥ -// ⎣-y x 0⎦ +// +// ⎡ 0 -z y⎤ +// Skew({x,y,z}) = ⎢ z 0 -x⎥ +// ⎣-y x 0⎦ func (m *Mat) Skew(v Vec) { m.Set(0, 0, 0) m.Set(0, 1, -v.Z) diff --git a/spatial/r3/mat_safe.go b/spatial/r3/mat_safe.go index 1d23d2fe..d62f6a48 100644 --- a/spatial/r3/mat_safe.go +++ b/spatial/r3/mat_safe.go @@ -57,9 +57,10 @@ func Eye() *Mat { } // Skew returns the 3×3 skew symmetric matrix (right hand system) of v. -// ⎡ 0 -z y⎤ -// Skew({x,y,z}) = ⎢ z 0 -x⎥ -// ⎣-y x 0⎦ +// +// ⎡ 0 -z y⎤ +// Skew({x,y,z}) = ⎢ z 0 -x⎥ +// ⎣-y x 0⎦ // // DEPRECATED: use Mat.Skew() func Skew(v Vec) (M *Mat) { diff --git a/spatial/r3/mat_unsafe.go b/spatial/r3/mat_unsafe.go index 59b251f1..dd50fe66 100644 --- a/spatial/r3/mat_unsafe.go +++ b/spatial/r3/mat_unsafe.go @@ -44,9 +44,10 @@ func Eye() *Mat { } // Skew returns the 3×3 skew symmetric matrix (right hand system) of v. -// ⎡ 0 -z y⎤ -// Skew({x,y,z}) = ⎢ z 0 -x⎥ -// ⎣-y x 0⎦ +// +// ⎡ 0 -z y⎤ +// Skew({x,y,z}) = ⎢ z 0 -x⎥ +// ⎣-y x 0⎦ // // DEPRECATED: use Mat.Skew() func Skew(v Vec) (M *Mat) { diff --git a/spatial/r3/triangle.go b/spatial/r3/triangle.go index 00ba2614..ee56e0f7 100644 --- a/spatial/r3/triangle.go +++ b/spatial/r3/triangle.go @@ -41,9 +41,9 @@ func (t Triangle) IsDegenerate(tol float64) bool { // longIdx returns index of the longest side. The sides // of the triangles are are as follows: -// - Side 0 formed by vertices 0 and 1 -// - Side 1 formed by vertices 1 and 2 -// - Side 2 formed by vertices 0 and 2 +// - Side 0 formed by vertices 0 and 1 +// - Side 1 formed by vertices 1 and 2 +// - Side 2 formed by vertices 0 and 2 func (t Triangle) longIdx() int { sides := [3]Vec{Sub(t[1], t[0]), Sub(t[2], t[1]), Sub(t[0], t[2])} len2 := [3]float64{Norm2(sides[0]), Norm2(sides[1]), Norm2(sides[2])} @@ -101,8 +101,9 @@ type line [2]Vec // vecOnLine takes a value between 0 and 1 to linearly // interpolate a point on the line. -// vecOnLine(0) returns l[0] -// vecOnLine(1) returns l[1] +// +// vecOnLine(0) returns l[0] +// vecOnLine(1) returns l[1] func (l line) vecOnLine(t float64) Vec { lineDir := Sub(l[1], l[0]) return Add(l[0], Scale(t, lineDir)) diff --git a/spatial/r3/vector.go b/spatial/r3/vector.go index 9d4ab0dd..29eb8342 100644 --- a/spatial/r3/vector.go +++ b/spatial/r3/vector.go @@ -58,13 +58,15 @@ func Rotate(p Vec, alpha float64, axis Vec) Vec { } // Norm returns the Euclidean norm of p -// |p| = sqrt(p_x^2 + p_y^2 + p_z^2). +// +// |p| = sqrt(p_x^2 + p_y^2 + p_z^2). func Norm(p Vec) float64 { return math.Hypot(p.X, math.Hypot(p.Y, p.Z)) } // Norm2 returns the Euclidean squared norm of p -// |p|^2 = p_x^2 + p_y^2 + p_z^2. +// +// |p|^2 = p_x^2 + p_y^2 + p_z^2. func Norm2(p Vec) float64 { return p.X*p.X + p.Y*p.Y + p.Z*p.Z } @@ -123,7 +125,8 @@ func absElem(a Vec) Vec { } // mulElem returns the Hadamard product between vectors a and b. -// v = {a.X*b.X, a.Y*b.Y, a.Z*b.Z} +// +// v = {a.X*b.X, a.Y*b.Y, a.Z*b.Z} func mulElem(a, b Vec) Vec { return Vec{ X: a.X * b.X, @@ -134,7 +137,8 @@ func mulElem(a, b Vec) Vec { // divElem returns the Hadamard product between vector a // and the inverse components of vector b. -// v = {a.X/b.X, a.Y/b.Y, a.Z/b.Z} +// +// v = {a.X/b.X, a.Y/b.Y, a.Z/b.Z} func divElem(a, b Vec) Vec { return Vec{ X: a.X / b.X, diff --git a/stat/boston_data_test.go b/stat/boston_data_test.go index 2036a7d4..2cf7586e 100644 --- a/stat/boston_data_test.go +++ b/stat/boston_data_test.go @@ -10,17 +10,18 @@ import "gonum.org/v1/gonum/mat" // http://dx.doi.org/10.1016/0095-0696(78)90006-2 // http://lib.stat.cmu.edu/datasets/boston // Columns are; -// per capita crime rate by town, -// proportion of non-retail business acres per town, -// nitric oxide concentration (parts per 10 million), -// weighted distances to Boston employment centers, -// index of accessibility to radial highways, -// pupil-teacher ratio by town, -// proportion of blacks by town, -// average number of rooms per dwelling, -// proportion of owner-occupied units built prior to 1940, -// full-value property-tax rate per $10000, -// median value of owner-occupied homes in $1000s. +// +// per capita crime rate by town, +// proportion of non-retail business acres per town, +// nitric oxide concentration (parts per 10 million), +// weighted distances to Boston employment centers, +// index of accessibility to radial highways, +// pupil-teacher ratio by town, +// proportion of blacks by town, +// average number of rooms per dwelling, +// proportion of owner-occupied units built prior to 1940, +// full-value property-tax rate per $10000, +// median value of owner-occupied homes in $1000s. var bostonData = mat.NewDense(506, 11, []float64{ 0.00632, 2.31000, 0.53800, 4.09000, 1.00000, 15.30000, 396.90000, 6.57500, 65.20000, 296.00000, 24.00000, 0.02731, 7.07000, 0.46900, 4.96710, 2.00000, 17.80000, 396.90000, 6.42100, 78.90000, 242.00000, 21.60000, diff --git a/stat/combin/combin.go b/stat/combin/combin.go index f52a71ec..1eeb5936 100644 --- a/stat/combin/combin.go +++ b/stat/combin/combin.go @@ -22,7 +22,7 @@ const ( // The binomial coefficient, C(n,k), is the number of unordered combinations of // k elements in a set that is n elements big, and is defined as // -// C(n,k) = n!/((n-k)!k!) +// C(n,k) = n!/((n-k)!k!) // // n and k must be non-negative with n >= k, otherwise Binomial will panic. // No check is made for overflow. @@ -46,7 +46,9 @@ func Binomial(n, k int) int { // GeneralizedBinomial returns the generalized binomial coefficient of (n, k), // defined as -// Γ(n+1) / (Γ(k+1) Γ(n-k+1)) +// +// Γ(n+1) / (Γ(k+1) Γ(n-k+1)) +// // where Γ is the Gamma function. GeneralizedBinomial is useful for continuous // relaxations of the binomial coefficient, or when the binomial coefficient value // may overflow int. In the latter case, one may use math/big for an exact @@ -274,14 +276,18 @@ func IndexToCombination(dst []int, idx, n, k int) []int { // Cartesian returns the Cartesian product of the slices in data. The Cartesian // product of two sets is the set of all combinations of the items. For example, // given the input -// []int{2, 3, 1} +// +// []int{2, 3, 1} +// // the returned matrix will be -// [ 0 0 0 ] -// [ 0 1 0 ] -// [ 0 2 0 ] -// [ 1 0 0 ] -// [ 1 1 0 ] -// [ 1 2 0 ] +// +// [ 0 0 0 ] +// [ 0 1 0 ] +// [ 0 2 0 ] +// [ 1 0 0 ] +// [ 1 1 0 ] +// [ 1 2 0 ] +// // Cartesian panics if any of the provided lengths are less than 1. func Cartesian(lens []int) [][]int { rows := Card(lens) diff --git a/stat/distmat/wishart.go b/stat/distmat/wishart.go index d89d3ef9..0f007dc1 100644 --- a/stat/distmat/wishart.go +++ b/stat/distmat/wishart.go @@ -20,7 +20,9 @@ import ( // definite matrix V. // // The Wishart PDF is given by -// p(X) = [|X|^((ν-d-1)/2) * exp(-tr(V^-1 * X)/2)] / [2^(ν*d/2) * |V|^(ν/2) * Γ_d(ν/2)] +// +// p(X) = [|X|^((ν-d-1)/2) * exp(-tr(V^-1 * X)/2)] / [2^(ν*d/2) * |V|^(ν/2) * Γ_d(ν/2)] +// // where X is a d×d PSD matrix, ν > d-1, |·| denotes the determinant, tr is the // trace and Γ_d is the multivariate gamma function. // diff --git a/stat/distmv/dirichlet.go b/stat/distmv/dirichlet.go index e4b01537..30511e41 100644 --- a/stat/distmv/dirichlet.go +++ b/stat/distmv/dirichlet.go @@ -20,7 +20,9 @@ import ( // generates elements over the probability simplex, i.e. ||x||_1 = 1. The Dirichlet // distribution is the conjugate prior to the categorical distribution and the // multivariate version of the beta distribution. The probability of a point x is -// 1/Beta(α) \prod_i x_i^(α_i - 1) +// +// 1/Beta(α) \prod_i x_i^(α_i - 1) +// // where Beta(α) is the multivariate Beta function (see the mathext package). // // For more information see https://en.wikipedia.org/wiki/Dirichlet_distribution @@ -59,7 +61,9 @@ func NewDirichlet(alpha []float64, src rand.Source) *Dirichlet { // CovarianceMatrix calculates the covariance matrix of the distribution, // storing the result in dst. Upon return, the value at element {i, j} of the // covariance matrix is equal to the covariance of the i^th and j^th variables. -// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])] +// +// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])] +// // If the dst matrix is empty it will be resized to the correct dimensions, // otherwise dst must match the dimension of the receiver or CovarianceMatrix // will panic. diff --git a/stat/distmv/normal.go b/stat/distmv/normal.go index 98bfb895..cc61c8d3 100644 --- a/stat/distmv/normal.go +++ b/stat/distmv/normal.go @@ -19,7 +19,9 @@ const badInputLength = "distmv: input slice length mismatch" // Normal is a multivariate normal distribution (also known as the multivariate // Gaussian distribution). Its pdf in k dimensions is given by -// (2 π)^(-k/2) |Σ|^(-1/2) exp(-1/2 (x-μ)'Σ^-1(x-μ)) +// +// (2 π)^(-k/2) |Σ|^(-1/2) exp(-1/2 (x-μ)'Σ^-1(x-μ)) +// // where μ is the mean vector and Σ the covariance matrix. Σ must be symmetric // and positive definite. Use NewNormal to construct. type Normal struct { @@ -119,8 +121,10 @@ func NewNormalPrecision(mu []float64, prec *mat.SymDense, src rand.Source) (norm // on the input evidence. The returned multivariate normal has dimension // n - len(observed), where n is the dimension of the original receiver. The updated // mean and covariance are -// mu = mu_un + sigma_{ob,un}ᵀ * sigma_{ob,ob}^-1 (v - mu_ob) -// sigma = sigma_{un,un} - sigma_{ob,un}ᵀ * sigma_{ob,ob}^-1 * sigma_{ob,un} +// +// mu = mu_un + sigma_{ob,un}ᵀ * sigma_{ob,ob}^-1 (v - mu_ob) +// sigma = sigma_{un,un} - sigma_{ob,un}ᵀ * sigma_{ob,ob}^-1 * sigma_{ob,un} +// // where mu_un and mu_ob are the original means of the unobserved and observed // variables respectively, sigma_{un,un} is the unobserved subset of the covariance // matrix, sigma_{ob,ob} is the observed subset of the covariance matrix, and @@ -154,7 +158,9 @@ func (n *Normal) ConditionNormal(observed []int, values []float64, src rand.Sour // CovarianceMatrix stores the covariance matrix of the distribution in dst. // Upon return, the value at element {i, j} of the covariance matrix is equal // to the covariance of the i^th and j^th variables. -// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])] +// +// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])] +// // If the dst matrix is empty it will be resized to the correct dimensions, // otherwise dst must match the dimension of the receiver or CovarianceMatrix // will panic. @@ -215,7 +221,9 @@ func normalLogProb(x, mu []float64, chol *mat.Cholesky, logSqrtDet float64) floa // MarginalNormal returns the marginal distribution of the given input variables. // That is, MarginalNormal returns -// p(x_i) = \int_{x_o} p(x_i | x_o) p(x_o) dx_o +// +// p(x_i) = \int_{x_o} p(x_i | x_o) p(x_o) dx_o +// // where x_i are the dimensions in the input, and x_o are the remaining dimensions. // See https://en.wikipedia.org/wiki/Marginal_distribution for more information. // @@ -232,7 +240,9 @@ func (n *Normal) MarginalNormal(vars []int, src rand.Source) (*Normal, bool) { // MarginalNormalSingle returns the marginal of the given input variable. // That is, MarginalNormal returns -// p(x_i) = \int_{x_¬i} p(x_i | x_¬i) p(x_¬i) dx_¬i +// +// p(x_i) = \int_{x_¬i} p(x_i | x_¬i) p(x_¬i) dx_¬i +// // where i is the input index. // See https://en.wikipedia.org/wiki/Marginal_distribution for more information. // @@ -320,7 +330,9 @@ func NormalRand(x, mean []float64, chol *mat.Cholesky, src rand.Source) []float6 // ScoreInput returns the gradient of the log-probability with respect to the // input x. That is, ScoreInput computes -// ∇_x log(p(x)) +// +// ∇_x log(p(x)) +// // If score is nil, a new slice will be allocated and returned. If score is of // length the dimension of Normal, then the result will be put in-place into score. // If neither of these is true, ScoreInput will panic. diff --git a/stat/distmv/statdist.go b/stat/distmv/statdist.go index 2a23c860..c835924c 100644 --- a/stat/distmv/statdist.go +++ b/stat/distmv/statdist.go @@ -18,21 +18,27 @@ import ( // probability distributions. // // The Bhattacharyya distance is defined as -// D_B = -ln(BC(l,r)) -// BC = \int_-∞^∞ (p(x)q(x))^(1/2) dx +// +// D_B = -ln(BC(l,r)) +// BC = \int_-∞^∞ (p(x)q(x))^(1/2) dx +// // Where BC is known as the Bhattacharyya coefficient. // The Bhattacharyya distance is related to the Hellinger distance by -// H(l,r) = sqrt(1-BC(l,r)) +// +// H(l,r) = sqrt(1-BC(l,r)) +// // For more information, see -// https://en.wikipedia.org/wiki/Bhattacharyya_distance +// +// https://en.wikipedia.org/wiki/Bhattacharyya_distance type Bhattacharyya struct{} // DistNormal computes the Bhattacharyya distance between normal distributions l and r. // The dimensions of the input distributions must match or DistNormal will panic. // // For Normal distributions, the Bhattacharyya distance is -// Σ = (Σ_l + Σ_r)/2 -// D_B = (1/8)*(μ_l - μ_r)ᵀ*Σ^-1*(μ_l - μ_r) + (1/2)*ln(det(Σ)/(det(Σ_l)*det(Σ_r))^(1/2)) +// +// Σ = (Σ_l + Σ_r)/2 +// D_B = (1/8)*(μ_l - μ_r)ᵀ*Σ^-1*(μ_l - μ_r) + (1/2)*ln(det(Σ)/(det(Σ_l)*det(Σ_r))^(1/2)) func (Bhattacharyya) DistNormal(l, r *Normal) float64 { dim := l.Dim() if dim != r.Dim() { @@ -94,10 +100,12 @@ func unifLogVolOverlap(b1, b2 []r1.Interval) float64 { // distributions. // // The cross-entropy is defined as -// - \int_x l(x) log(r(x)) dx = KL(l || r) + H(l) +// - \int_x l(x) log(r(x)) dx = KL(l || r) + H(l) +// // where KL is the Kullback-Leibler divergence and H is the entropy. // For more information, see -// https://en.wikipedia.org/wiki/Cross_entropy +// +// https://en.wikipedia.org/wiki/Cross_entropy type CrossEntropy struct{} // DistNormal returns the cross-entropy between normal distributions l and r. @@ -114,13 +122,18 @@ func (CrossEntropy) DistNormal(l, r *Normal) float64 { // distributions. // // The Hellinger distance is defined as -// H^2(l,r) = 1/2 * int_x (\sqrt(l(x)) - \sqrt(r(x)))^2 dx +// +// H^2(l,r) = 1/2 * int_x (\sqrt(l(x)) - \sqrt(r(x)))^2 dx +// // and is bounded between 0 and 1. Note the above formula defines the squared // Hellinger distance, while this returns the Hellinger distance itself. // The Hellinger distance is related to the Bhattacharyya distance by -// H^2 = 1 - exp(-D_B) +// +// H^2 = 1 - exp(-D_B) +// // For more information, see -// https://en.wikipedia.org/wiki/Hellinger_distance +// +// https://en.wikipedia.org/wiki/Hellinger_distance type Hellinger struct{} // DistNormal returns the Hellinger distance between normal distributions l and r. @@ -140,7 +153,9 @@ func (Hellinger) DistNormal(l, r *Normal) float64 { // KullbackLeibler is a type for computing the Kullback-Leibler divergence from l to r. // // The Kullback-Leibler divergence is defined as -// D_KL(l || r ) = \int_x p(x) log(p(x)/q(x)) dx +// +// D_KL(l || r ) = \int_x p(x) log(p(x)/q(x)) dx +// // Note that the Kullback-Leibler divergence is not symmetric with respect to // the order of the input arguments. type KullbackLeibler struct{} @@ -150,8 +165,10 @@ type KullbackLeibler struct{} // or DistDirichlet will panic. // // For two Dirichlet distributions, the KL divergence is computed as -// D_KL(l || r) = log Γ(α_0_l) - \sum_i log Γ(α_i_l) - log Γ(α_0_r) + \sum_i log Γ(α_i_r) -// + \sum_i (α_i_l - α_i_r)(ψ(α_i_l)- ψ(α_0_l)) +// +// D_KL(l || r) = log Γ(α_0_l) - \sum_i log Γ(α_i_l) - log Γ(α_0_r) + \sum_i log Γ(α_i_r) +// + \sum_i (α_i_l - α_i_r)(ψ(α_i_l)- ψ(α_0_l)) +// // Where Γ is the gamma function, ψ is the digamma function, and α_0 is the // sum of the Dirichlet parameters. func (KullbackLeibler) DistDirichlet(l, r *Dirichlet) float64 { @@ -179,7 +196,8 @@ func (KullbackLeibler) DistDirichlet(l, r *Dirichlet) float64 { // The dimensions of the input distributions must match or DistNormal will panic. // // For two normal distributions, the KL divergence is computed as -// D_KL(l || r) = 0.5*[ln(|Σ_r|) - ln(|Σ_l|) + (μ_l - μ_r)ᵀ*Σ_r^-1*(μ_l - μ_r) + tr(Σ_r^-1*Σ_l)-d] +// +// D_KL(l || r) = 0.5*[ln(|Σ_r|) - ln(|Σ_l|) + (μ_l - μ_r)ᵀ*Σ_r^-1*(μ_l - μ_r) + tr(Σ_r^-1*Σ_l)-d] func (KullbackLeibler) DistNormal(l, r *Normal) float64 { dim := l.Dim() if dim != r.Dim() { @@ -242,10 +260,14 @@ func (KullbackLeibler) DistUniform(l, r *Uniform) float64 { // Renyi is a type for computing the Rényi divergence of order α from l to r. // // The Rényi divergence with α > 0, α ≠ 1 is defined as -// D_α(l || r) = 1/(α-1) log(\int_-∞^∞ l(x)^α r(x)^(1-α)dx) +// +// D_α(l || r) = 1/(α-1) log(\int_-∞^∞ l(x)^α r(x)^(1-α)dx) +// // The Rényi divergence has special forms for α = 0 and α = 1. This type does // not implement α = ∞. For α = 0, -// D_0(l || r) = -log \int_-∞^∞ r(x)1{p(x)>0} dx +// +// D_0(l || r) = -log \int_-∞^∞ r(x)1{p(x)>0} dx +// // that is, the negative log probability under r(x) that l(x) > 0. // When α = 1, the Rényi divergence is equal to the Kullback-Leibler divergence. // The Rényi divergence is also equal to half the Bhattacharyya distance when α = 0.5. @@ -259,12 +281,15 @@ type Renyi struct { // The dimensions of the input distributions must match or DistNormal will panic. // // For two normal distributions, the Rényi divergence is computed as -// Σ_α = (1-α) Σ_l + αΣ_r -// D_α(l||r) = α/2 * (μ_l - μ_r)'*Σ_α^-1*(μ_l - μ_r) + 1/(2(α-1))*ln(|Σ_λ|/(|Σ_l|^(1-α)*|Σ_r|^α)) +// +// Σ_α = (1-α) Σ_l + αΣ_r +// D_α(l||r) = α/2 * (μ_l - μ_r)'*Σ_α^-1*(μ_l - μ_r) + 1/(2(α-1))*ln(|Σ_λ|/(|Σ_l|^(1-α)*|Σ_r|^α)) // // For a more nicely formatted version of the formula, see Eq. 15 of -// Kolchinsky, Artemy, and Brendan D. Tracey. "Estimating Mixture Entropy -// with Pairwise Distances." arXiv preprint arXiv:1706.02419 (2017). +// +// Kolchinsky, Artemy, and Brendan D. Tracey. "Estimating Mixture Entropy +// with Pairwise Distances." arXiv preprint arXiv:1706.02419 (2017). +// // Note that the this formula is for Chernoff divergence, which differs from // Rényi divergence by a factor of 1-α. Also be aware that most sources in // the literature report this formula incorrectly. @@ -312,18 +337,24 @@ func (renyi Renyi) DistNormal(l, r *Normal) float64 { // probability distributions. // // The Wasserstein distance is defined as -// W(l,r) := inf 𝔼(||X-Y||_2^2)^1/2 +// +// W(l,r) := inf 𝔼(||X-Y||_2^2)^1/2 +// // For more information, see -// https://en.wikipedia.org/wiki/Wasserstein_metric +// +// https://en.wikipedia.org/wiki/Wasserstein_metric type Wasserstein struct{} // DistNormal returns the Wasserstein distance between normal distributions l and r. // The dimensions of the input distributions must match or DistNormal will panic. // // The Wasserstein distance for Normal distributions is -// d^2 = ||m_l - m_r||_2^2 + Tr(Σ_l + Σ_r - 2(Σ_l^(1/2)*Σ_r*Σ_l^(1/2))^(1/2)) +// +// d^2 = ||m_l - m_r||_2^2 + Tr(Σ_l + Σ_r - 2(Σ_l^(1/2)*Σ_r*Σ_l^(1/2))^(1/2)) +// // For more information, see -// http://djalil.chafai.net/blog/2010/04/30/wasserstein-distance-between-two-gaussians/ +// +// http://djalil.chafai.net/blog/2010/04/30/wasserstein-distance-between-two-gaussians/ func (Wasserstein) DistNormal(l, r *Normal) float64 { dim := l.Dim() if dim != r.Dim() { diff --git a/stat/distmv/studentst.go b/stat/distmv/studentst.go index aefe3cdb..d6c23b44 100644 --- a/stat/distmv/studentst.go +++ b/stat/distmv/studentst.go @@ -19,8 +19,10 @@ import ( // StudentsT is a multivariate Student's T distribution. It is a distribution over // ℝ^n with the probability density -// p(y) = (Γ((ν+n)/2) / Γ(ν/2)) * (νπ)^(-n/2) * |Ʃ|^(-1/2) * -// (1 + 1/ν * (y-μ)ᵀ * Ʃ^-1 * (y-μ))^(-(ν+n)/2) +// +// p(y) = (Γ((ν+n)/2) / Γ(ν/2)) * (νπ)^(-n/2) * |Ʃ|^(-1/2) * +// (1 + 1/ν * (y-μ)ᵀ * Ʃ^-1 * (y-μ))^(-(ν+n)/2) +// // where ν is a scalar greater than 2, μ is a vector in ℝ^n, and Ʃ is an n×n // symmetric positive definite matrix. // @@ -224,7 +226,9 @@ func findUnob(observed []int, dim int) (unobserved []int) { // CovarianceMatrix calculates the covariance matrix of the distribution, // storing the result in dst. Upon return, the value at element {i, j} of the // covariance matrix is equal to the covariance of the i^th and j^th variables. -// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])] +// +// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])] +// // If the dst matrix is empty it will be resized to the correct dimensions, // otherwise dst must match the dimension of the receiver or CovarianceMatrix // will panic. @@ -264,7 +268,9 @@ func (s *StudentsT) LogProb(y []float64) float64 { // MarginalStudentsT returns the marginal distribution of the given input variables, // and the success of the operation. // That is, MarginalStudentsT returns -// p(x_i) = \int_{x_o} p(x_i | x_o) p(x_o) dx_o +// +// p(x_i) = \int_{x_o} p(x_i | x_o) p(x_o) dx_o +// // where x_i are the dimensions in the input, and x_o are the remaining dimensions. // See https://en.wikipedia.org/wiki/Marginal_distribution for more information. // @@ -285,7 +291,9 @@ func (s *StudentsT) MarginalStudentsT(vars []int, src rand.Source) (dist *Studen // MarginalStudentsTSingle returns the marginal distribution of the given input variable. // That is, MarginalStudentsTSingle returns -// p(x_i) = \int_{x_o} p(x_i | x_o) p(x_o) dx_o +// +// p(x_i) = \int_{x_o} p(x_i | x_o) p(x_o) dx_o +// // where i is the input index, and x_o are the remaining dimensions. // See https://en.wikipedia.org/wiki/Marginal_distribution for more information. // diff --git a/stat/distuv/beta.go b/stat/distuv/beta.go index 8887ae1a..e72c26e8 100644 --- a/stat/distuv/beta.go +++ b/stat/distuv/beta.go @@ -16,7 +16,8 @@ import ( // with support between 0 and 1. // // The beta distribution has density function -// x^(α-1) * (1-x)^(β-1) * Γ(α+β) / (Γ(α)*Γ(β)) +// +// x^(α-1) * (1-x)^(β-1) * Γ(α+β) / (Γ(α)*Γ(β)) // // For more information, see https://en.wikipedia.org/wiki/Beta_distribution type Beta struct { diff --git a/stat/distuv/binomial.go b/stat/distuv/binomial.go index ab04430b..9a480140 100644 --- a/stat/distuv/binomial.go +++ b/stat/distuv/binomial.go @@ -17,7 +17,9 @@ import ( // that expresses the probability of a given number of successful Bernoulli trials // out of a total of n, each with success probability p. // The binomial distribution has the density function: -// f(k) = (n choose k) p^k (1-p)^(n-k) +// +// f(k) = (n choose k) p^k (1-p)^(n-k) +// // For more information, see https://en.wikipedia.org/wiki/Binomial_distribution. type Binomial struct { // N is the total number of Bernoulli trials. N must be greater than 0. diff --git a/stat/distuv/chi.go b/stat/distuv/chi.go index 82099d58..07efb284 100644 --- a/stat/distuv/chi.go +++ b/stat/distuv/chi.go @@ -16,7 +16,8 @@ import ( // with support on the positive numbers. // // The density function is given by -// 1/(2^{k/2-1} * Γ(k/2)) * x^{k - 1} * e^{-x^2/2} +// +// 1/(2^{k/2-1} * Γ(k/2)) * x^{k - 1} * e^{-x^2/2} // // For more information, see https://en.wikipedia.org/wiki/Chi_distribution. type Chi struct { diff --git a/stat/distuv/chisquared.go b/stat/distuv/chisquared.go index ea48b39d..000e19b3 100644 --- a/stat/distuv/chisquared.go +++ b/stat/distuv/chisquared.go @@ -16,7 +16,9 @@ import ( // with support on the positive numbers. // // The density function is given by -// 1/(2^{k/2} * Γ(k/2)) * x^{k/2 - 1} * e^{-x/2} +// +// 1/(2^{k/2} * Γ(k/2)) * x^{k/2 - 1} * e^{-x/2} +// // It is a special case of the Gamma distribution, Γ(k/2, 1/2). // // For more information, see https://en.wikipedia.org/wiki/Chi-squared_distribution. diff --git a/stat/distuv/exponential.go b/stat/distuv/exponential.go index 09b19a68..9770c445 100644 --- a/stat/distuv/exponential.go +++ b/stat/distuv/exponential.go @@ -138,7 +138,9 @@ func (e Exponential) Rand() float64 { // Score returns the score function with respect to the parameters of the // distribution at the input location x. The score function is the derivative // of the log-likelihood at x with respect to the parameters -// (∂/∂θ) log(p(x;θ)) +// +// (∂/∂θ) log(p(x;θ)) +// // If deriv is non-nil, len(deriv) must equal the number of parameters otherwise // Score will panic, and the derivative is stored in-place into deriv. If deriv // is nil a new slice will be allocated and returned. @@ -148,7 +150,8 @@ func (e Exponential) Rand() float64 { // For more information, see https://en.wikipedia.org/wiki/Score_%28statistics%29. // // Special cases: -// Score(0) = [NaN] +// +// Score(0) = [NaN] func (e Exponential) Score(deriv []float64, x float64) []float64 { if deriv == nil { deriv = make([]float64, e.NumParameters()) @@ -171,9 +174,12 @@ func (e Exponential) Score(deriv []float64, x float64) []float64 { // ScoreInput returns the score function with respect to the input of the // distribution at the input location specified by x. The score function is the // derivative of the log-likelihood -// (d/dx) log(p(x)) . +// +// (d/dx) log(p(x)) . +// // Special cases: -// ScoreInput(0) = NaN +// +// ScoreInput(0) = NaN func (e Exponential) ScoreInput(x float64) float64 { if x > 0 { return -e.Rate diff --git a/stat/distuv/f.go b/stat/distuv/f.go index b9ae7856..e7e6434d 100644 --- a/stat/distuv/f.go +++ b/stat/distuv/f.go @@ -16,7 +16,9 @@ import ( // with support over the positive real numbers. // // The F-distribution has density function -// sqrt(((d1*x)^d1) * d2^d2 / ((d1*x+d2)^(d1+d2))) / (x * B(d1/2,d2/2)) +// +// sqrt(((d1*x)^d1) * d2^d2 / ((d1*x+d2)^(d1+d2))) / (x * B(d1/2,d2/2)) +// // where B is the beta function. // // For more information, see https://en.wikipedia.org/wiki/F-distribution diff --git a/stat/distuv/gamma.go b/stat/distuv/gamma.go index 0b893769..169c9507 100644 --- a/stat/distuv/gamma.go +++ b/stat/distuv/gamma.go @@ -16,7 +16,8 @@ import ( // with support over the positive real numbers. // // The gamma distribution has density function -// β^α / Γ(α) x^(α-1)e^(-βx) +// +// β^α / Γ(α) x^(α-1)e^(-βx) // // For more information, see https://en.wikipedia.org/wiki/Gamma_distribution type Gamma struct { diff --git a/stat/distuv/gumbel.go b/stat/distuv/gumbel.go index 260e4a69..1c25f4a1 100644 --- a/stat/distuv/gumbel.go +++ b/stat/distuv/gumbel.go @@ -15,8 +15,10 @@ import ( // Gumbel distribution is also sometimes known as the Extreme Value distribution. // // The right-skewed Gumbel distribution has density function -// 1/beta * exp(-(z + exp(-z))) -// z = (x - mu)/beta +// +// 1/beta * exp(-(z + exp(-z))) +// z = (x - mu)/beta +// // Beta must be greater than 0. // // For more information, see https://en.wikipedia.org/wiki/Gumbel_distribution. diff --git a/stat/distuv/inversegamma.go b/stat/distuv/inversegamma.go index a1089411..672ccc2a 100644 --- a/stat/distuv/inversegamma.go +++ b/stat/distuv/inversegamma.go @@ -18,7 +18,8 @@ import ( // of a gamma distributed random variable. // // The inverse gamma distribution has density function -// β^α / Γ(α) x^(-α-1)e^(-β/x) +// +// β^α / Γ(α) x^(-α-1)e^(-β/x) // // For more information, see https://en.wikipedia.org/wiki/Inverse-gamma_distribution type InverseGamma struct { diff --git a/stat/distuv/laplace.go b/stat/distuv/laplace.go index b75d0aa0..09fdc2e4 100644 --- a/stat/distuv/laplace.go +++ b/stat/distuv/laplace.go @@ -173,7 +173,9 @@ func (l Laplace) Rand() float64 { // Score returns the score function with respect to the parameters of the // distribution at the input location x. The score function is the derivative // of the log-likelihood at x with respect to the parameters -// (∂/∂θ) log(p(x;θ)) +// +// (∂/∂θ) log(p(x;θ)) +// // If deriv is non-nil, len(deriv) must equal the number of parameters otherwise // Score will panic, and the derivative is stored in-place into deriv. If deriv // is nil a new slice will be allocated and returned. @@ -183,7 +185,8 @@ func (l Laplace) Rand() float64 { // For more information, see https://en.wikipedia.org/wiki/Score_%28statistics%29. // // Special cases: -// Score(l.Mu) = [NaN, -1/l.Scale] +// +// Score(l.Mu) = [NaN, -1/l.Scale] func (l Laplace) Score(deriv []float64, x float64) []float64 { if deriv == nil { deriv = make([]float64, l.NumParameters()) @@ -208,9 +211,12 @@ func (l Laplace) Score(deriv []float64, x float64) []float64 { // ScoreInput returns the score function with respect to the input of the // distribution at the input location specified by x. The score function is the // derivative of the log-likelihood -// (d/dx) log(p(x)) . +// +// (d/dx) log(p(x)) . +// // Special cases: -// ScoreInput(l.Mu) = NaN +// +// ScoreInput(l.Mu) = NaN func (l Laplace) ScoreInput(x float64) float64 { diff := x - l.Mu if diff == 0 { diff --git a/stat/distuv/logistic.go b/stat/distuv/logistic.go index 4972122a..0392d6cc 100644 --- a/stat/distuv/logistic.go +++ b/stat/distuv/logistic.go @@ -12,8 +12,9 @@ import ( // Its cumulative distribution function is the logistic function. // // General form of probability density function for Logistic distribution is -// E(x) / (s * (1 + E(x))^2) -// where E(x) = exp(-(x-μ)/s) +// +// E(x) / (s * (1 + E(x))^2) +// where E(x) = exp(-(x-μ)/s) // // For more information, see https://en.wikipedia.org/wiki/Logistic_distribution. type Logistic struct { diff --git a/stat/distuv/lognormal.go b/stat/distuv/lognormal.go index f2ec023a..cbfd48a2 100644 --- a/stat/distuv/lognormal.go +++ b/stat/distuv/lognormal.go @@ -12,7 +12,8 @@ import ( // LogNormal represents a random variable whose log is normally distributed. // The probability density function is given by -// 1/(x σ √2π) exp(-(ln(x)-μ)^2)/(2σ^2)) +// +// 1/(x σ √2π) exp(-(ln(x)-μ)^2)/(2σ^2)) type LogNormal struct { Mu float64 Sigma float64 diff --git a/stat/distuv/norm.go b/stat/distuv/norm.go index ae7734e2..67614660 100644 --- a/stat/distuv/norm.go +++ b/stat/distuv/norm.go @@ -149,7 +149,9 @@ func (n Normal) Rand() float64 { // Score returns the score function with respect to the parameters of the // distribution at the input location x. The score function is the derivative // of the log-likelihood at x with respect to the parameters -// (∂/∂θ) log(p(x;θ)) +// +// (∂/∂θ) log(p(x;θ)) +// // If deriv is non-nil, len(deriv) must equal the number of parameters otherwise // Score will panic, and the derivative is stored in-place into deriv. If deriv // is nil a new slice will be allocated and returned. @@ -172,7 +174,8 @@ func (n Normal) Score(deriv []float64, x float64) []float64 { // ScoreInput returns the score function with respect to the input of the // distribution at the input location specified by x. The score function is the // derivative of the log-likelihood -// (d/dx) log(p(x)) . +// +// (d/dx) log(p(x)) . func (n Normal) ScoreInput(x float64) float64 { return -(1 / (2 * n.Sigma * n.Sigma)) * 2 * (x - n.Mu) } diff --git a/stat/distuv/pareto.go b/stat/distuv/pareto.go index b29abacb..c3f0aa1f 100644 --- a/stat/distuv/pareto.go +++ b/stat/distuv/pareto.go @@ -14,7 +14,8 @@ import ( // with support above the scale parameter. // // The density function is given by -// (α x_m^{α})/(x^{α+1}) for x >= x_m. +// +// (α x_m^{α})/(x^{α+1}) for x >= x_m. // // For more information, see https://en.wikipedia.org/wiki/Pareto_distribution. type Pareto struct { diff --git a/stat/distuv/poisson.go b/stat/distuv/poisson.go index cad75ede..34e9c436 100644 --- a/stat/distuv/poisson.go +++ b/stat/distuv/poisson.go @@ -16,7 +16,9 @@ import ( // that expresses the probability of a given number of events occurring in a fixed // interval. // The poisson distribution has density function: -// f(k) = λ^k / k! e^(-λ) +// +// f(k) = λ^k / k! e^(-λ) +// // For more information, see https://en.wikipedia.org/wiki/Poisson_distribution. type Poisson struct { // Lambda is the average number of events in an interval. diff --git a/stat/distuv/statdist.go b/stat/distuv/statdist.go index 91c61618..bf333db1 100644 --- a/stat/distuv/statdist.go +++ b/stat/distuv/statdist.go @@ -14,18 +14,25 @@ import ( // probability distributions. // // The Bhattacharyya distance is defined as -// D_B = -ln(BC(l,r)) -// BC = \int_-∞^∞ (p(x)q(x))^(1/2) dx +// +// D_B = -ln(BC(l,r)) +// BC = \int_-∞^∞ (p(x)q(x))^(1/2) dx +// // Where BC is known as the Bhattacharyya coefficient. // The Bhattacharyya distance is related to the Hellinger distance by -// H(l,r) = sqrt(1-BC(l,r)) +// +// H(l,r) = sqrt(1-BC(l,r)) +// // For more information, see -// https://en.wikipedia.org/wiki/Bhattacharyya_distance +// +// https://en.wikipedia.org/wiki/Bhattacharyya_distance type Bhattacharyya struct{} // DistBeta returns the Bhattacharyya distance between Beta distributions l and r. // For Beta distributions, the Bhattacharyya distance is given by -// -ln(B((α_l + α_r)/2, (β_l + β_r)/2) / (B(α_l,β_l), B(α_r,β_r))) +// +// -ln(B((α_l + α_r)/2, (β_l + β_r)/2) / (B(α_l,β_l), B(α_r,β_r))) +// // Where B is the Beta function. func (Bhattacharyya) DistBeta(l, r Beta) float64 { // Reference: https://en.wikipedia.org/wiki/Hellinger_distance#Examples @@ -35,8 +42,9 @@ func (Bhattacharyya) DistBeta(l, r Beta) float64 { // DistNormal returns the Bhattacharyya distance Normal distributions l and r. // For Normal distributions, the Bhattacharyya distance is given by -// s = (σ_l^2 + σ_r^2)/2 -// BC = 1/8 (μ_l-μ_r)^2/s + 1/2 ln(s/(σ_l*σ_r)) +// +// s = (σ_l^2 + σ_r^2)/2 +// BC = 1/8 (μ_l-μ_r)^2/s + 1/2 ln(s/(σ_l*σ_r)) func (Bhattacharyya) DistNormal(l, r Normal) float64 { // Reference: https://en.wikipedia.org/wiki/Bhattacharyya_distance m := l.Mu - r.Mu @@ -48,13 +56,18 @@ func (Bhattacharyya) DistNormal(l, r Normal) float64 { // distributions. // // The Hellinger distance is defined as -// H^2(l,r) = 1/2 * int_x (\sqrt(l(x)) - \sqrt(r(x)))^2 dx +// +// H^2(l,r) = 1/2 * int_x (\sqrt(l(x)) - \sqrt(r(x)))^2 dx +// // and is bounded between 0 and 1. Note the above formula defines the squared // Hellinger distance, while this returns the Hellinger distance itself. // The Hellinger distance is related to the Bhattacharyya distance by -// H^2 = 1 - exp(-D_B) +// +// H^2 = 1 - exp(-D_B) +// // For more information, see -// https://en.wikipedia.org/wiki/Hellinger_distance +// +// https://en.wikipedia.org/wiki/Hellinger_distance type Hellinger struct{} // DistBeta computes the Hellinger distance between Beta distributions l and r. @@ -74,7 +87,9 @@ func (Hellinger) DistNormal(l, r Normal) float64 { // KullbackLeibler is a type for computing the Kullback-Leibler divergence from l to r. // // The Kullback-Leibler divergence is defined as -// D_KL(l || r ) = \int_x p(x) log(p(x)/q(x)) dx +// +// D_KL(l || r ) = \int_x p(x) log(p(x)/q(x)) dx +// // Note that the Kullback-Leibler divergence is not symmetric with respect to // the order of the input arguments. type KullbackLeibler struct{} @@ -83,9 +98,11 @@ type KullbackLeibler struct{} // l and r. // // For two Beta distributions, the KL divergence is computed as -// D_KL(l || r) = log Γ(α_l+β_l) - log Γ(α_l) - log Γ(β_l) -// - log Γ(α_r+β_r) + log Γ(α_r) + log Γ(β_r) -// + (α_l-α_r)(ψ(α_l)-ψ(α_l+β_l)) + (β_l-β_r)(ψ(β_l)-ψ(α_l+β_l)) +// +// D_KL(l || r) = log Γ(α_l+β_l) - log Γ(α_l) - log Γ(β_l) +// - log Γ(α_r+β_r) + log Γ(α_r) + log Γ(β_r) +// + (α_l-α_r)(ψ(α_l)-ψ(α_l+β_l)) + (β_l-β_r)(ψ(β_l)-ψ(α_l+β_l)) +// // Where Γ is the gamma function and ψ is the digamma function. func (KullbackLeibler) DistBeta(l, r Beta) float64 { // http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/ @@ -116,7 +133,8 @@ func (KullbackLeibler) DistBeta(l, r Beta) float64 { // l and r. // // For two Normal distributions, the KL divergence is computed as -// D_KL(l || r) = log(σ_r / σ_l) + (σ_l^2 + (μ_l-μ_r)^2)/(2 * σ_r^2) - 0.5 +// +// D_KL(l || r) = log(σ_r / σ_l) + (σ_l^2 + (μ_l-μ_r)^2)/(2 * σ_r^2) - 0.5 func (KullbackLeibler) DistNormal(l, r Normal) float64 { d := l.Mu - r.Mu v := (l.Sigma*l.Sigma + d*d) / (2 * r.Sigma * r.Sigma) diff --git a/stat/distuv/studentst.go b/stat/distuv/studentst.go index 89624624..26d305c2 100644 --- a/stat/distuv/studentst.go +++ b/stat/distuv/studentst.go @@ -18,7 +18,8 @@ const logPi = 1.1447298858494001741 // http://oeis.org/A053510 // over the real numbers. // // The Student's T distribution has density function -// Γ((ν+1)/2) / (sqrt(νπ) Γ(ν/2) σ) (1 + 1/ν * ((x-μ)/σ)^2)^(-(ν+1)/2) +// +// Γ((ν+1)/2) / (sqrt(νπ) Γ(ν/2) σ) (1 + 1/ν * ((x-μ)/σ)^2)^(-(ν+1)/2) // // The Student's T distribution approaches the normal distribution as ν → ∞. // diff --git a/stat/distuv/triangle.go b/stat/distuv/triangle.go index 3bf44bee..76e1227e 100644 --- a/stat/distuv/triangle.go +++ b/stat/distuv/triangle.go @@ -137,7 +137,9 @@ func (t Triangle) Rand() float64 { // Score returns the score function with respect to the parameters of the // distribution at the input location x. The score function is the derivative // of the log-likelihood at x with respect to the parameters -// (∂/∂θ) log(p(x;θ)) +// +// (∂/∂θ) log(p(x;θ)) +// // If deriv is non-nil, len(deriv) must equal the number of parameters otherwise // Score will panic, and the derivative is stored in-place into deriv. If deriv // is nil a new slice will be allocated and returned. @@ -197,10 +199,13 @@ func (t Triangle) Score(deriv []float64, x float64) []float64 { // ScoreInput returns the score function with respect to the input of the // distribution at the input location specified by x. The score function is the // derivative of the log-likelihood -// (d/dx) log(p(x)) . +// +// (d/dx) log(p(x)) . +// // Special cases (c is the mode of the distribution): -// ScoreInput(c) = NaN -// ScoreInput(x) = NaN for x not in (a, b) +// +// ScoreInput(c) = NaN +// ScoreInput(x) = NaN for x not in (a, b) func (t Triangle) ScoreInput(x float64) float64 { if (x <= t.a) || (x >= t.b) || (x == t.c) { return math.NaN() diff --git a/stat/distuv/uniform.go b/stat/distuv/uniform.go index a2d2b9e4..01415389 100644 --- a/stat/distuv/uniform.go +++ b/stat/distuv/uniform.go @@ -123,7 +123,9 @@ func (u Uniform) Rand() float64 { // Score returns the score function with respect to the parameters of the // distribution at the input location x. The score function is the derivative // of the log-likelihood at x with respect to the parameters -// (∂/∂θ) log(p(x;θ)) +// +// (∂/∂θ) log(p(x;θ)) +// // If deriv is non-nil, len(deriv) must equal the number of parameters otherwise // Score will panic, and the derivative is stored in-place into deriv. If deriv // is nil a new slice will be allocated and returned. @@ -157,7 +159,8 @@ func (u Uniform) Score(deriv []float64, x float64) []float64 { // ScoreInput returns the score function with respect to the input of the // distribution at the input location specified by x. The score function is the // derivative of the log-likelihood -// (d/dx) log(p(x)) . +// +// (d/dx) log(p(x)) . func (u Uniform) ScoreInput(x float64) float64 { if (x <= u.Min) || (x >= u.Max) { return math.NaN() diff --git a/stat/distuv/weibull.go b/stat/distuv/weibull.go index 4cd8dd8b..bbf49271 100644 --- a/stat/distuv/weibull.go +++ b/stat/distuv/weibull.go @@ -50,9 +50,10 @@ func (w Weibull) gammaIPow(i, pow float64) float64 { // // Special cases occur when x == 0, and the result depends on the shape // parameter as follows: -// If 0 < K < 1, LogProb returns +Inf. -// If K == 1, LogProb returns 0. -// If K > 1, LogProb returns -Inf. +// +// If 0 < K < 1, LogProb returns +Inf. +// If K == 1, LogProb returns 0. +// If K > 1, LogProb returns -Inf. func (w Weibull) LogProb(x float64) float64 { if x < 0 { return math.Inf(-1) @@ -127,7 +128,9 @@ func (w Weibull) Rand() float64 { // Score returns the score function with respect to the parameters of the // distribution at the input location x. The score function is the derivative // of the log-likelihood at x with respect to the parameters -// (∂/∂θ) log(p(x;θ)) +// +// (∂/∂θ) log(p(x;θ)) +// // If deriv is non-nil, len(deriv) must equal the number of parameters otherwise // Score will panic, and the derivative is stored in-place into deriv. If deriv // is nil a new slice will be allocated and returned. @@ -137,7 +140,8 @@ func (w Weibull) Rand() float64 { // For more information, see https://en.wikipedia.org/wiki/Score_%28statistics%29. // // Special cases: -// Score(x) = [NaN, NaN] for x <= 0 +// +// Score(x) = [NaN, NaN] for x <= 0 func (w Weibull) Score(deriv []float64, x float64) []float64 { if deriv == nil { deriv = make([]float64, w.NumParameters()) @@ -158,10 +162,12 @@ func (w Weibull) Score(deriv []float64, x float64) []float64 { // ScoreInput returns the score function with respect to the input of the // distribution at the input location specified by x. The score function is the // derivative of the log-likelihood -// (d/dx) log(p(x)) . +// +// (d/dx) log(p(x)) . // // Special cases: -// ScoreInput(x) = NaN for x <= 0 +// +// ScoreInput(x) = NaN for x <= 0 func (w Weibull) ScoreInput(x float64) float64 { if x > 0 { return (-w.K*math.Pow(x/w.Lambda, w.K) + w.K - 1) / x diff --git a/stat/roc.go b/stat/roc.go index be6c4a80..05c6b44d 100644 --- a/stat/roc.go +++ b/stat/roc.go @@ -139,7 +139,9 @@ func ROC(cutoffs, y []float64, classes []bool, weights []float64) (tpr, fpr, thr // The returned ntp values can be interpreted as the number of true positives // where values above the given rank are assigned class true for each given // rank from 1 to len(classes). -// ntp_i = sum_{j ≥ len(ntp)-1 - i} [ classes_j ] * weights_j, where [x] = 1 if x else 0. +// +// ntp_i = sum_{j ≥ len(ntp)-1 - i} [ classes_j ] * weights_j, where [x] = 1 if x else 0. +// // The values of min and max provide the minimum and maximum possible number // of false values for the set of classes. The first element of ntp, min and // max are always zero as this corresponds to assigning all data class false diff --git a/stat/samplemv/metropolishastings.go b/stat/samplemv/metropolishastings.go index fa11e786..e784fc93 100644 --- a/stat/samplemv/metropolishastings.go +++ b/stat/samplemv/metropolishastings.go @@ -41,7 +41,9 @@ type MHProposal interface { // chain implicitly defined by the proposal distribution. At each // iteration, a proposal point is generated randomly from the current location. // This proposal point is accepted with probability -// p = min(1, (target(new) * proposal(current|new)) / (target(current) * proposal(new|current))) +// +// p = min(1, (target(new) * proposal(current|new)) / (target(current) * proposal(new|current))) +// // If the new location is accepted, it becomes the new current location. // If it is rejected, the current location remains. This is the sample stored in // batch, ignoring BurnIn and Rate (discussed below). @@ -186,7 +188,9 @@ func NewProposalNormal(sigma *mat.SymDense, src rand.Source) (*ProposalNormal, b // ConditionalLogProb returns the probability of the first argument conditioned on // being at the second argument. -// p(x|y) +// +// p(x|y) +// // ConditionalLogProb panics if the input slices are not the same length or // are not equal to the dimension of the covariance matrix. func (p *ProposalNormal) ConditionalLogProb(x, y []float64) (prob float64) { diff --git a/stat/samplemv/samplemv.go b/stat/samplemv/samplemv.go index be85de90..3b65e5bd 100644 --- a/stat/samplemv/samplemv.go +++ b/stat/samplemv/samplemv.go @@ -158,7 +158,9 @@ var ErrRejection = errors.New("rejection: acceptance ratio above 1") // Rejection sampling generates points from the target distribution by using // the proposal distribution. At each step of the algorithm, the proposed point // is accepted with probability -// p = target(x) / (proposal(x) * c) +// +// p = target(x) / (proposal(x) * c) +// // where target(x) is the probability of the point according to the target distribution // and proposal(x) is the probability according to the proposal distribution. // The constant c must be chosen such that target(x) < proposal(x) * c for all x. diff --git a/stat/sampleuv/sample.go b/stat/sampleuv/sample.go index 36e84223..a8505290 100644 --- a/stat/sampleuv/sample.go +++ b/stat/sampleuv/sample.go @@ -147,7 +147,9 @@ var ErrRejection = errors.New("rejection: acceptance ratio above 1") // Rejection sampling generates points from the target distribution by using // the proposal distribution. At each step of the algorithm, the proposed point // is accepted with probability -// p = target(x) / (proposal(x) * c) +// +// p = target(x) / (proposal(x) * c) +// // where target(x) is the probability of the point according to the target distribution // and proposal(x) is the probability according to the proposal distribution. // The constant c must be chosen such that target(x) < proposal(x) * c for all x. @@ -254,7 +256,9 @@ type MHProposal interface { // chain implicitly defined by the proposal distribution. At each // iteration, a proposal point is generated randomly from the current location. // This proposal point is accepted with probability -// p = min(1, (target(new) * proposal(current|new)) / (target(current) * proposal(new|current))) +// +// p = min(1, (target(new) * proposal(current|new)) / (target(current) * proposal(new|current))) +// // If the new location is accepted, it becomes the new current location. // If it is rejected, the current location remains. This is the sample stored in // batch, ignoring BurnIn and Rate (discussed below). diff --git a/stat/spatial/spatial.go b/stat/spatial/spatial.go index 994c085a..ca641b59 100644 --- a/stat/spatial/spatial.go +++ b/stat/spatial/spatial.go @@ -16,12 +16,12 @@ import ( // GetisOrdGStar returns the Local Getis-Ord G*i statistic for element of the // weighted data using the provided locality matrix. The returned value is a z-score. // -// G^*_i = num_i / den_i +// G^*_i = num_i / den_i // -// num_i = \sum_j (w_{ij} x_j) - \bar X \sum_j w_{ij} -// den_i = S \sqrt(((n \sum_j w_{ij}^2 - (\sum_j w_{ij})^2))/(n - 1)) -// \bar X = (\sum_j x_j) / n -// S = \sqrt((\sum_j x_j^2)/n - (\bar X)^2) +// num_i = \sum_j (w_{ij} x_j) - \bar X \sum_j w_{ij} +// den_i = S \sqrt(((n \sum_j w_{ij}^2 - (\sum_j w_{ij})^2))/(n - 1)) +// \bar X = (\sum_j x_j) / n +// S = \sqrt((\sum_j x_j^2)/n - (\bar X)^2) // // GetisOrdGStar will panic if locality is not a square matrix with dimensions the // same as the length of data or if i is not a valid index into data. diff --git a/stat/stat.go b/stat/stat.go index 6649e0a6..f7d43726 100644 --- a/stat/stat.go +++ b/stat/stat.go @@ -25,7 +25,8 @@ const ( ) // bhattacharyyaCoeff computes the Bhattacharyya Coefficient for probability distributions given by: -// \sum_i \sqrt{p_i q_i} +// +// \sum_i \sqrt{p_i q_i} // // It is assumed that p and q have equal length. func bhattacharyyaCoeff(p, q []float64) float64 { @@ -37,7 +38,8 @@ func bhattacharyyaCoeff(p, q []float64) float64 { } // Bhattacharyya computes the distance between the probability distributions p and q given by: -// -\ln ( \sum_i \sqrt{p_i q_i} ) +// +// -\ln ( \sum_i \sqrt{p_i q_i} ) // // The lengths of p and q must be equal. It is assumed that p and q sum to 1. func Bhattacharyya(p, q []float64) float64 { @@ -59,8 +61,8 @@ func Bhattacharyya(p, q []float64) float64 { // CDF will panic if the length of x is zero. // // CumulantKind behaviors: -// - Empirical: Returns the lowest fraction for which q is greater than or equal -// to that fraction of samples +// - Empirical: Returns the lowest fraction for which q is greater than or equal +// to that fraction of samples func CDF(q float64, c CumulantKind, x, weights []float64) float64 { if weights != nil && len(x) != len(weights) { panic("stat: slice length mismatch") @@ -112,7 +114,8 @@ func CDF(q float64, c CumulantKind, x, weights []float64) float64 { // ChiSquare computes the chi-square distance between the observed frequencies 'obs' and // expected frequencies 'exp' given by: -// \sum_i (obs_i-exp_i)^2 / exp_i +// +// \sum_i (obs_i-exp_i)^2 / exp_i // // The lengths of obs and exp must be equal. func ChiSquare(obs, exp []float64) float64 { @@ -131,7 +134,9 @@ func ChiSquare(obs, exp []float64) float64 { } // CircularMean returns the circular mean of the dataset. +// // atan2(\sum_i w_i * sin(alpha_i), \sum_i w_i * cos(alpha_i)) +// // If weights is nil then all of the weights are 1. If weights is not nil, then // len(x) must equal len(weights). func CircularMean(x, weights []float64) float64 { @@ -157,7 +162,9 @@ func CircularMean(x, weights []float64) float64 { // Correlation returns the weighted correlation between the samples of x and y // with the given means. -// sum_i {w_i (x_i - meanX) * (y_i - meanY)} / (stdX * stdY) +// +// sum_i {w_i (x_i - meanX) * (y_i - meanY)} / (stdX * stdY) +// // The lengths of x and y must be equal. If weights is nil then all of the // weights are 1. If weights is not nil, then len(x) must equal len(weights). func Correlation(x, y, weights []float64) float64 { @@ -277,7 +284,9 @@ func Kendall(x, y, weights []float64) float64 { } // Covariance returns the weighted covariance between the samples of x and y. -// sum_i {w_i (x_i - meanX) * (y_i - meanY)} / (sum_j {w_j} - 1) +// +// sum_i {w_i (x_i - meanX) * (y_i - meanY)} / (sum_j {w_j} - 1) +// // The lengths of x and y must be equal. If weights is nil then all of the // weights are 1. If weights is not nil, then len(x) must equal len(weights). func Covariance(x, y, weights []float64) float64 { @@ -353,7 +362,7 @@ func CrossEntropy(p, q []float64) float64 { // Entropy computes the Shannon entropy of a distribution or the distance between // two distributions. The natural logarithm is used. -// - sum_i (p_i * log_e(p_i)) +// - sum_i (p_i * log_e(p_i)) func Entropy(p []float64) float64 { var e float64 for _, v := range p { @@ -402,7 +411,9 @@ func kurtosisCorrection(n float64) (mul, offset float64) { } // GeometricMean returns the weighted geometric mean of the dataset -// \prod_i {x_i ^ w_i} +// +// \prod_i {x_i ^ w_i} +// // This only applies with positive x and positive weights. If weights is nil // then all of the weights are 1. If weights is not nil, then len(x) must equal // len(weights). @@ -431,7 +442,9 @@ func GeometricMean(x, weights []float64) float64 { } // HarmonicMean returns the weighted harmonic mean of the dataset -// \sum_i {w_i} / ( sum_i {w_i / x_i} ) +// +// \sum_i {w_i} / ( sum_i {w_i / x_i} ) +// // This only applies with positive x and positive weights. // If weights is nil then all of the weights are 1. If weights is not nil, then // len(x) must equal len(weights). @@ -463,7 +476,8 @@ func HarmonicMean(x, weights []float64) float64 { } // Hellinger computes the distance between the probability distributions p and q given by: -// \sqrt{ 1 - \sum_i \sqrt{p_i q_i} } +// +// \sqrt{ 1 - \sum_i \sqrt{p_i q_i} } // // The lengths of p and q must be equal. It is assumed that p and q sum to 1. func Hellinger(p, q []float64) float64 { @@ -480,11 +494,11 @@ func Hellinger(p, q []float64) float64 { // with bin creation. // // The following conditions on the inputs apply: -// - The count variable must either be nil or have length of one less than dividers. -// - The values in dividers must be sorted (use the sort package). -// - The x values must be sorted. -// - If weights is nil then all of the weights are 1. -// - If weights is not nil, then len(x) must equal len(weights). +// - The count variable must either be nil or have length of one less than dividers. +// - The values in dividers must be sorted (use the sort package). +// - The x values must be sorted. +// - If weights is nil then all of the weights are 1. +// - If weights is not nil, then len(x) must equal len(weights). func Histogram(count, dividers, x, weights []float64) []float64 { if weights != nil && len(x) != len(weights) { panic("stat: slice length mismatch") @@ -560,8 +574,10 @@ func Histogram(count, dividers, x, weights []float64) []float64 { // JensenShannon computes the JensenShannon divergence between the distributions // p and q. The Jensen-Shannon divergence is defined as -// m = 0.5 * (p + q) -// JS(p, q) = 0.5 ( KL(p, m) + KL(q, m) ) +// +// m = 0.5 * (p + q) +// JS(p, q) = 0.5 ( KL(p, m) + KL(q, m) ) +// // Unlike Kullback-Leibler, the Jensen-Shannon distance is symmetric. The value // is between 0 and ln(2). func JensenShannon(p, q []float64) float64 { @@ -592,8 +608,9 @@ func JensenShannon(p, q []float64) float64 { // len(y) must equal len(yWeights). Both x and y must be sorted. // // Special cases are: -// = 0 if len(x) == len(y) == 0 -// = 1 if len(x) == 0, len(y) != 0 or len(x) != 0 and len(y) == 0 +// +// = 0 if len(x) == len(y) == 0 +// = 1 if len(x) == 0, len(y) != 0 or len(x) != 0 and len(y) == 0 func KolmogorovSmirnov(x, xWeights, y, yWeights []float64) float64 { if xWeights != nil && len(x) != len(xWeights) { panic("stat: slice length mismatch") @@ -722,7 +739,9 @@ func updateKS(idx int, cdf, sum float64, values, weights []float64, isNil bool) // KullbackLeibler computes the Kullback-Leibler distance between the // distributions p and q. The natural logarithm is used. -// sum_i(p_i * log(p_i / q_i)) +// +// sum_i(p_i * log(p_i / q_i)) +// // Note that the Kullback-Leibler distance is not symmetric; // KullbackLeibler(p,q) != KullbackLeibler(q,p) func KullbackLeibler(p, q []float64) float64 { @@ -739,13 +758,17 @@ func KullbackLeibler(p, q []float64) float64 { } // LinearRegression computes the best-fit line -// y = alpha + beta*x +// +// y = alpha + beta*x +// // to the data in x and y with the given weights. If origin is true, the // regression is forced to pass through the origin. // // Specifically, LinearRegression computes the values of alpha and // beta such that the total residual -// \sum_i w[i]*(y[i] - alpha - beta*x[i])^2 +// +// \sum_i w[i]*(y[i] - alpha - beta*x[i])^2 +// // is minimized. If origin is true, then alpha is forced to be zero. // // The lengths of x and y must be equal. If weights is nil then all of the @@ -783,9 +806,13 @@ func LinearRegression(x, y, weights []float64, origin bool) (alpha, beta float64 } // RSquared returns the coefficient of determination defined as -// R^2 = 1 - \sum_i w[i]*(y[i] - alpha - beta*x[i])^2 / \sum_i w[i]*(y[i] - mean(y))^2 +// +// R^2 = 1 - \sum_i w[i]*(y[i] - alpha - beta*x[i])^2 / \sum_i w[i]*(y[i] - mean(y))^2 +// // for the line -// y = alpha + beta*x +// +// y = alpha + beta*x +// // and the data in x and y with the given weights. // // The lengths of x and y must be equal. If weights is nil then all of the @@ -816,7 +843,9 @@ func RSquared(x, y, weights []float64, alpha, beta float64) float64 { } // RSquaredFrom returns the coefficient of determination defined as -// R^2 = 1 - \sum_i w[i]*(estimate[i] - value[i])^2 / \sum_i w[i]*(value[i] - mean(values))^2 +// +// R^2 = 1 - \sum_i w[i]*(estimate[i] - value[i])^2 / \sum_i w[i]*(value[i] - mean(values))^2 +// // and the data in estimates and values with the given weights. // // The lengths of estimates and values must be equal. If weights is nil then @@ -846,9 +875,13 @@ func RSquaredFrom(estimates, values, weights []float64) float64 { } // RNoughtSquared returns the coefficient of determination defined as -// R₀^2 = \sum_i w[i]*(beta*x[i])^2 / \sum_i w[i]*y[i]^2 +// +// R₀^2 = \sum_i w[i]*(beta*x[i])^2 / \sum_i w[i]*y[i]^2 +// // for the line -// y = beta*x +// +// y = beta*x +// // and the data in x and y with the given weights. RNoughtSquared should // only be used for best-fit lines regressed through the origin. // @@ -877,7 +910,9 @@ func RNoughtSquared(x, y, weights []float64, beta float64) float64 { } // Mean computes the weighted mean of the data set. -// sum_i {w_i * x_i} / sum_i {w_i} +// +// sum_i {w_i * x_i} / sum_i {w_i} +// // If weights is nil then all of the weights are 1. If weights is not nil, then // len(x) must equal len(weights). func Mean(x, weights []float64) float64 { @@ -932,7 +967,9 @@ func Mode(x, weights []float64) (val float64, count float64) { } // BivariateMoment computes the weighted mixed moment between the samples x and y. -// E[(x - μ_x)^r*(y - μ_y)^s] +// +// E[(x - μ_x)^r*(y - μ_y)^s] +// // No degrees of freedom correction is done. // The lengths of x and y must be equal. If weights is nil then all of the // weights are 1. If weights is not nil, then len(x) must equal len(weights). @@ -967,7 +1004,9 @@ func BivariateMoment(r, s float64, x, y, weights []float64) float64 { } // Moment computes the weighted n^th moment of the samples, -// E[(x - μ)^N] +// +// E[(x - μ)^N] +// // No degrees of freedom correction is done. // If weights is nil then all of the weights are 1. If weights is not nil, then // len(x) must equal len(weights). @@ -995,7 +1034,9 @@ func Moment(moment float64, x, weights []float64) float64 { // MomentAbout computes the weighted n^th weighted moment of the samples about // the given mean \mu, -// E[(x - μ)^N] +// +// E[(x - μ)^N] +// // No degrees of freedom correction is done. // If weights is nil then all of the weights are 1. If weights is not nil, then // len(x) must equal len(weights). @@ -1033,9 +1074,9 @@ func MomentAbout(moment float64, x []float64, mean float64, weights []float64) f // Quantile will panic if the length of x is zero. // // CumulantKind behaviors: -// - Empirical: Returns the lowest value q for which q is greater than or equal -// to the fraction p of samples -// - LinInterp: Returns the linearly interpolated value +// - Empirical: Returns the lowest value q for which q is greater than or equal +// to the fraction p of samples +// - LinInterp: Returns the linearly interpolated value func Quantile(p float64, c CumulantKind, x, weights []float64) float64 { if !(p >= 0 && p <= 1) { panic("stat: percentile out of bounds") @@ -1246,13 +1287,16 @@ func StdErr(std, sampleSize float64) float64 { // StdScore returns the standard score (a.k.a. z-score, z-value) for the value x // with the given mean and standard deviation, i.e. -// (x - mean) / std +// +// (x - mean) / std func StdScore(x, mean, std float64) float64 { return (x - mean) / std } // Variance computes the unbiased weighted sample variance: -// \sum_i w_i (x_i - mean)^2 / (sum_i w_i - 1) +// +// \sum_i w_i (x_i - mean)^2 / (sum_i w_i - 1) +// // If weights is nil then all of the weights are 1. If weights is not nil, then // len(x) must equal len(weights). // When weights sum to 1 or less, a biased variance estimator should be used. @@ -1262,8 +1306,10 @@ func Variance(x, weights []float64) float64 { } // MeanVariance computes the sample mean and unbiased variance, where the mean and variance are -// \sum_i w_i * x_i / (sum_i w_i) -// \sum_i w_i (x_i - mean)^2 / (sum_i w_i - 1) +// +// \sum_i w_i * x_i / (sum_i w_i) +// \sum_i w_i (x_i - mean)^2 / (sum_i w_i - 1) +// // respectively. // If weights is nil then all of the weights are 1. If weights is not nil, then // len(x) must equal len(weights). @@ -1279,8 +1325,10 @@ func MeanVariance(x, weights []float64) (mean, variance float64) { // PopMeanVariance computes the sample mean and biased variance (also known as // "population variance"), where the mean and variance are -// \sum_i w_i * x_i / (sum_i w_i) -// \sum_i w_i (x_i - mean)^2 / (sum_i w_i) +// +// \sum_i w_i * x_i / (sum_i w_i) +// \sum_i w_i (x_i - mean)^2 / (sum_i w_i) +// // respectively. // If weights is nil then all of the weights are 1. If weights is not nil, then // len(x) must equal len(weights). @@ -1308,7 +1356,9 @@ func PopStdDev(x, weights []float64) float64 { } // PopVariance computes the unbiased weighted sample variance: -// \sum_i w_i (x_i - mean)^2 / (sum_i w_i) +// +// \sum_i w_i (x_i - mean)^2 / (sum_i w_i) +// // If weights is nil then all of the weights are 1. If weights is not nil, then // len(x) must equal len(weights). func PopVariance(x, weights []float64) float64 { diff --git a/stat/statmat.go b/stat/statmat.go index 06fcd93d..4f05f306 100644 --- a/stat/statmat.go +++ b/stat/statmat.go @@ -123,7 +123,9 @@ func corrToCov(c *mat.SymDense, sigma []float64) { } // Mahalanobis computes the Mahalanobis distance -// D = sqrt((x-y)ᵀ * Σ^-1 * (x-y)) +// +// D = sqrt((x-y)ᵀ * Σ^-1 * (x-y)) +// // between the column vectors x and y given the cholesky decomposition of Σ. // Mahalanobis returns NaN if the linear solve fails. // diff --git a/unit/doc.go b/unit/doc.go index 631531fe..08f433b4 100644 --- a/unit/doc.go +++ b/unit/doc.go @@ -11,7 +11,7 @@ // base SI units and common derived units; and a system for dynamically // extensible user-defined units. // -// Static SI units +// # Static SI units // // This package provides a number of types representing either an SI base // unit or a common combination of base units, named for the physical quantity @@ -19,50 +19,50 @@ // float64. The value of the float64 represents the quantity of that unit as // expressed in SI base units (kilogram, metre, Pascal, etc.). For example, // -// height := 1.6 * unit.Metre -// acc := unit.Acceleration(9.8) +// height := 1.6 * unit.Metre +// acc := unit.Acceleration(9.8) // // creates a variable named 'height' with a value of 1.6 metres, and // a variable named 'acc' with a value of 9.8 metres per second squared. // These types can be used to add compile-time safety to code. For // example, // -// func unitVolume(t unit.Temperature, p unit.Pressure) unit.Volume { -// ... -// } +// func unitVolume(t unit.Temperature, p unit.Pressure) unit.Volume { +// ... +// } // -// func main(){ -// t := 300 * unit.Kelvin -// p := 500 * unit.Kilo * unit.Pascal -// v := unitVolume(p, t) // compile-time error -// } +// func main(){ +// t := 300 * unit.Kelvin +// p := 500 * unit.Kilo * unit.Pascal +// v := unitVolume(p, t) // compile-time error +// } // // gives a compile-time error (temperature type does not match pressure type) // while the corresponding code using float64 runs without error. // -// func float64Volume(temperature, pressure float64) float64 { -// ... -// } +// func float64Volume(temperature, pressure float64) float64 { +// ... +// } // -// func main(){ -// t := 300.0 // Kelvin -// p := 500000.0 // Pascals -// v := float64Volume(p, t) // no error -// } +// func main(){ +// t := 300.0 // Kelvin +// p := 500000.0 // Pascals +// v := float64Volume(p, t) // no error +// } // // Many types have constants defined representing named SI units (Metre, // Kilogram, etc. ) or SI derived units (Pascal, Hz, etc.). The unit package // additionally provides untyped constants for SI prefixes, so the following // are all equivalent. // -// l := 0.001 * unit.Metre -// k := 1 * unit.Milli * unit.Metre -// j := unit.Length(0.001) +// l := 0.001 * unit.Metre +// k := 1 * unit.Milli * unit.Metre +// j := unit.Length(0.001) // // Additional SI-derived static units can also be defined by adding types that // satisfy the Uniter interface described below. // -// Dynamic user-extensible unit system +// # Dynamic user-extensible unit system // // The unit package also provides the Unit type, a representation of a general // dimensional value. Unit can be used to help prevent errors of dimensionality @@ -70,19 +70,19 @@ // variables of type Unit can be created with the New function and the // Dimensions map. For example, the code // -// rate := unit.New(1 * unit.Milli, Dimensions{MoleDim: 1, TimeDim: -1}) +// rate := unit.New(1 * unit.Milli, Dimensions{MoleDim: 1, TimeDim: -1}) // // creates a variable "rate" which has a value of 1e-3 mol/s. Methods of // unit can be used to modify this value, for example: // -// rate.Mul(1 * unit.Centi * unit.Metre).Div(1 * unit.Milli * unit.Volt) +// rate.Mul(1 * unit.Centi * unit.Metre).Div(1 * unit.Milli * unit.Volt) // // To convert the unit back into a typed float64 value, the From methods // of the dimensional types should be used. From will return an error if the // dimensions do not match. // -// var energy unit.Energy -// err := energy.From(acc) +// var energy unit.Energy +// err := energy.From(acc) // // Domain-specific problems may need custom dimensions, and for this purpose // NewDimension should be used to help avoid accidental overlap between @@ -92,13 +92,13 @@ // string which will be used for printing that dimension, and will return // a unique dimension number. // -// wbc := unit.NewDimension("WhiteBloodCell") +// wbc := unit.NewDimension("WhiteBloodCell") // // NewDimension should not be used, however, to create the unit of 'Slide', // because in this case slide is just a measurement of liquid volume. Instead, // a constant could be defined. // -// const Slide unit.Volume = 0.1 * unit.Micro * unit.Litre +// const Slide unit.Volume = 0.1 * unit.Micro * unit.Litre // // Note that unit cannot catch all errors related to dimensionality. // Different physical ideas are sometimes expressed with the same dimensions @@ -109,9 +109,9 @@ // represent units can help to catch errors at compile-time. For example, // using unit.Torque allows you to define a statically typed function like so // -// func LeverLength(apply unit.Force, want unit.Torque) unit.Length { +// func LeverLength(apply unit.Force, want unit.Torque) unit.Length { // return unit.Length(float64(want)/float64(apply)) -// } +// } // // This will prevent an energy value being provided to LeverLength in place // of a torque value. diff --git a/version.go b/version.go index f158fd01..95f2eee0 100644 --- a/version.go +++ b/version.go @@ -16,7 +16,9 @@ const root = "gonum.org/v1/gonum" // // If a replace directive exists in the Gonum go.mod, the replace will // be reported in the version in the following format: -// "version=>[replace-path] [replace-version]" +// +// "version=>[replace-path] [replace-version]" +// // and the replace sum will be returned in place of the original sum. // // The exact version format returned by Version may change in future.