mirror of
https://github.com/gonum/gonum.git
synced 2025-10-07 08:01:20 +08:00
all: update packages from mat64 to mat.
This mostly changes package name and code, but also fixes a couple of name clashes with the new package names
This commit is contained in:
@@ -9,7 +9,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/diff/fd"
|
"gonum.org/v1/gonum/diff/fd"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleDerivative() {
|
func ExampleDerivative() {
|
||||||
@@ -53,12 +53,12 @@ func ExampleJacobian() {
|
|||||||
dst[2] = 4*x[1]*x[1] - 2*x[2]
|
dst[2] = 4*x[1]*x[1] - 2*x[2]
|
||||||
dst[3] = x[2] * math.Sin(x[0])
|
dst[3] = x[2] * math.Sin(x[0])
|
||||||
}
|
}
|
||||||
jac := mat64.NewDense(4, 3, nil)
|
jac := mat.NewDense(4, 3, nil)
|
||||||
fd.Jacobian(jac, f, []float64{1, 2, 3}, &fd.JacobianSettings{
|
fd.Jacobian(jac, f, []float64{1, 2, 3}, &fd.JacobianSettings{
|
||||||
Formula: fd.Central,
|
Formula: fd.Central,
|
||||||
Concurrent: true,
|
Concurrent: true,
|
||||||
})
|
})
|
||||||
fmt.Printf("J ≈ %.6v\n", mat64.Formatted(jac, mat64.Prefix(" ")))
|
fmt.Printf("J ≈ %.6v\n", mat.Formatted(jac, mat.Prefix(" ")))
|
||||||
|
|
||||||
// Output:
|
// Output:
|
||||||
// J ≈ ⎡ 1 0 0⎤
|
// J ≈ ⎡ 1 0 0⎤
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
type JacobianSettings struct {
|
type JacobianSettings struct {
|
||||||
@@ -39,7 +39,7 @@ type JacobianSettings struct {
|
|||||||
//
|
//
|
||||||
// dst must be non-nil, the number of its columns must equal the length of x, and
|
// dst must be non-nil, the number of its columns must equal the length of x, and
|
||||||
// the derivative order of the formula must be 1, otherwise Jacobian will panic.
|
// the derivative order of the formula must be 1, otherwise Jacobian will panic.
|
||||||
func Jacobian(dst *mat64.Dense, f func(y, x []float64), x []float64, settings *JacobianSettings) {
|
func Jacobian(dst *mat.Dense, f func(y, x []float64), x []float64, settings *JacobianSettings) {
|
||||||
n := len(x)
|
n := len(x)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
panic("jacobian: x has zero length")
|
panic("jacobian: x has zero length")
|
||||||
@@ -93,7 +93,7 @@ func Jacobian(dst *mat64.Dense, f func(y, x []float64), x []float64, settings *J
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func jacobianSerial(dst *mat64.Dense, f func([]float64, []float64), x, origin []float64, formula Formula, step float64) {
|
func jacobianSerial(dst *mat.Dense, f func([]float64, []float64), x, origin []float64, formula Formula, step float64) {
|
||||||
m, n := dst.Dims()
|
m, n := dst.Dims()
|
||||||
xcopy := make([]float64, n)
|
xcopy := make([]float64, n)
|
||||||
y := make([]float64, m)
|
y := make([]float64, m)
|
||||||
@@ -122,7 +122,7 @@ func jacobianSerial(dst *mat64.Dense, f func([]float64, []float64), x, origin []
|
|||||||
dst.Scale(1/step, dst)
|
dst.Scale(1/step, dst)
|
||||||
}
|
}
|
||||||
|
|
||||||
func jacobianConcurrent(dst *mat64.Dense, f func([]float64, []float64), x, origin []float64, formula Formula, step float64, nWorkers int) {
|
func jacobianConcurrent(dst *mat.Dense, f func([]float64, []float64), x, origin []float64, formula Formula, step float64, nWorkers int) {
|
||||||
m, n := dst.Dims()
|
m, n := dst.Dims()
|
||||||
for i := 0; i < m; i++ {
|
for i := 0; i < m; i++ {
|
||||||
for j := 0; j < n; j++ {
|
for j := 0; j < n; j++ {
|
||||||
@@ -138,7 +138,7 @@ func jacobianConcurrent(dst *mat64.Dense, f func([]float64, []float64), x, origi
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
xcopy := make([]float64, n)
|
xcopy := make([]float64, n)
|
||||||
y := make([]float64, m)
|
y := make([]float64, m)
|
||||||
yVec := mat64.NewVector(m, y)
|
yVec := mat.NewVector(m, y)
|
||||||
for job := range jobs {
|
for job := range jobs {
|
||||||
copy(xcopy, x)
|
copy(xcopy, x)
|
||||||
xcopy[job.j] += job.pt.Loc * step
|
xcopy[job.j] += job.pt.Loc * step
|
||||||
@@ -182,7 +182,7 @@ func jacobianConcurrent(dst *mat64.Dense, f func([]float64, []float64), x, origi
|
|||||||
// all columns of dst. Iterate again over all Formula points
|
// all columns of dst. Iterate again over all Formula points
|
||||||
// because we don't forbid repeated locations.
|
// because we don't forbid repeated locations.
|
||||||
|
|
||||||
originVec := mat64.NewVector(m, origin)
|
originVec := mat.NewVector(m, origin)
|
||||||
for _, pt := range formula.Stencil {
|
for _, pt := range formula.Stencil {
|
||||||
if pt.Loc != 0 {
|
if pt.Loc != 0 {
|
||||||
continue
|
continue
|
||||||
|
@@ -10,13 +10,13 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
func vecFunc13(y, x []float64) {
|
func vecFunc13(y, x []float64) {
|
||||||
y[0] = 5*x[0] + x[2]*math.Sin(x[1]) + 1
|
y[0] = 5*x[0] + x[2]*math.Sin(x[1]) + 1
|
||||||
}
|
}
|
||||||
func vecFunc13Jac(jac *mat64.Dense, x []float64) {
|
func vecFunc13Jac(jac *mat.Dense, x []float64) {
|
||||||
jac.Set(0, 0, 5)
|
jac.Set(0, 0, 5)
|
||||||
jac.Set(0, 1, x[2]*math.Cos(x[1]))
|
jac.Set(0, 1, x[2]*math.Cos(x[1]))
|
||||||
jac.Set(0, 2, math.Sin(x[1]))
|
jac.Set(0, 2, math.Sin(x[1]))
|
||||||
@@ -26,7 +26,7 @@ func vecFunc22(y, x []float64) {
|
|||||||
y[0] = x[0]*x[0]*x[1] + 1
|
y[0] = x[0]*x[0]*x[1] + 1
|
||||||
y[1] = 5*x[0] + math.Sin(x[1]) + 1
|
y[1] = 5*x[0] + math.Sin(x[1]) + 1
|
||||||
}
|
}
|
||||||
func vecFunc22Jac(jac *mat64.Dense, x []float64) {
|
func vecFunc22Jac(jac *mat.Dense, x []float64) {
|
||||||
jac.Set(0, 0, 2*x[0]*x[1])
|
jac.Set(0, 0, 2*x[0]*x[1])
|
||||||
jac.Set(0, 1, x[0]*x[0])
|
jac.Set(0, 1, x[0]*x[0])
|
||||||
jac.Set(1, 0, 5)
|
jac.Set(1, 0, 5)
|
||||||
@@ -39,7 +39,7 @@ func vecFunc43(y, x []float64) {
|
|||||||
y[2] = 4*x[1]*x[1] - 2*x[2] + 1
|
y[2] = 4*x[1]*x[1] - 2*x[2] + 1
|
||||||
y[3] = x[2]*math.Sin(x[0]) + 1
|
y[3] = x[2]*math.Sin(x[0]) + 1
|
||||||
}
|
}
|
||||||
func vecFunc43Jac(jac *mat64.Dense, x []float64) {
|
func vecFunc43Jac(jac *mat.Dense, x []float64) {
|
||||||
jac.Set(0, 0, 1)
|
jac.Set(0, 0, 1)
|
||||||
jac.Set(0, 1, 0)
|
jac.Set(0, 1, 0)
|
||||||
jac.Set(0, 2, 0)
|
jac.Set(0, 2, 0)
|
||||||
@@ -61,7 +61,7 @@ func TestJacobian(t *testing.T) {
|
|||||||
for tc, test := range []struct {
|
for tc, test := range []struct {
|
||||||
m, n int
|
m, n int
|
||||||
f func([]float64, []float64)
|
f func([]float64, []float64)
|
||||||
jac func(*mat64.Dense, []float64)
|
jac func(*mat.Dense, []float64)
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
m: 1,
|
m: 1,
|
||||||
@@ -88,15 +88,15 @@ func TestJacobian(t *testing.T) {
|
|||||||
xcopy := make([]float64, test.n)
|
xcopy := make([]float64, test.n)
|
||||||
copy(xcopy, x)
|
copy(xcopy, x)
|
||||||
|
|
||||||
want := mat64.NewDense(test.m, test.n, nil)
|
want := mat.NewDense(test.m, test.n, nil)
|
||||||
test.jac(want, x)
|
test.jac(want, x)
|
||||||
|
|
||||||
got := mat64.NewDense(test.m, test.n, nil)
|
got := mat.NewDense(test.m, test.n, nil)
|
||||||
fillNaNDense(got)
|
fillNaNDense(got)
|
||||||
Jacobian(got, test.f, x, nil)
|
Jacobian(got, test.f, x, nil)
|
||||||
if !mat64.EqualApprox(want, got, tol) {
|
if !mat.EqualApprox(want, got, tol) {
|
||||||
t.Errorf("Case %d (default settings): unexpected Jacobian.\nwant: %v\ngot: %v",
|
t.Errorf("Case %d (default settings): unexpected Jacobian.\nwant: %v\ngot: %v",
|
||||||
tc, mat64.Formatted(want, mat64.Prefix(" ")), mat64.Formatted(got, mat64.Prefix(" ")))
|
tc, mat.Formatted(want, mat.Prefix(" ")), mat.Formatted(got, mat.Prefix(" ")))
|
||||||
}
|
}
|
||||||
if !floats.Equal(x, xcopy) {
|
if !floats.Equal(x, xcopy) {
|
||||||
t.Errorf("Case %d (default settings): x modified", tc)
|
t.Errorf("Case %d (default settings): x modified", tc)
|
||||||
@@ -107,7 +107,7 @@ func TestJacobian(t *testing.T) {
|
|||||||
for tc, test := range []struct {
|
for tc, test := range []struct {
|
||||||
m, n int
|
m, n int
|
||||||
f func([]float64, []float64)
|
f func([]float64, []float64)
|
||||||
jac func(*mat64.Dense, []float64)
|
jac func(*mat.Dense, []float64)
|
||||||
tol float64
|
tol float64
|
||||||
formula Formula
|
formula Formula
|
||||||
}{
|
}{
|
||||||
@@ -188,17 +188,17 @@ func TestJacobian(t *testing.T) {
|
|||||||
xcopy := make([]float64, test.n)
|
xcopy := make([]float64, test.n)
|
||||||
copy(xcopy, x)
|
copy(xcopy, x)
|
||||||
|
|
||||||
want := mat64.NewDense(test.m, test.n, nil)
|
want := mat.NewDense(test.m, test.n, nil)
|
||||||
test.jac(want, x)
|
test.jac(want, x)
|
||||||
|
|
||||||
got := mat64.NewDense(test.m, test.n, nil)
|
got := mat.NewDense(test.m, test.n, nil)
|
||||||
fillNaNDense(got)
|
fillNaNDense(got)
|
||||||
Jacobian(got, test.f, x, &JacobianSettings{
|
Jacobian(got, test.f, x, &JacobianSettings{
|
||||||
Formula: test.formula,
|
Formula: test.formula,
|
||||||
})
|
})
|
||||||
if !mat64.EqualApprox(want, got, test.tol) {
|
if !mat.EqualApprox(want, got, test.tol) {
|
||||||
t.Errorf("Case %d: unexpected Jacobian.\nwant: %v\ngot: %v",
|
t.Errorf("Case %d: unexpected Jacobian.\nwant: %v\ngot: %v",
|
||||||
tc, mat64.Formatted(want, mat64.Prefix(" ")), mat64.Formatted(got, mat64.Prefix(" ")))
|
tc, mat.Formatted(want, mat.Prefix(" ")), mat.Formatted(got, mat.Prefix(" ")))
|
||||||
}
|
}
|
||||||
if !floats.Equal(x, xcopy) {
|
if !floats.Equal(x, xcopy) {
|
||||||
t.Errorf("Case %d: x modified", tc)
|
t.Errorf("Case %d: x modified", tc)
|
||||||
@@ -209,9 +209,9 @@ func TestJacobian(t *testing.T) {
|
|||||||
Formula: test.formula,
|
Formula: test.formula,
|
||||||
Concurrent: true,
|
Concurrent: true,
|
||||||
})
|
})
|
||||||
if !mat64.EqualApprox(want, got, test.tol) {
|
if !mat.EqualApprox(want, got, test.tol) {
|
||||||
t.Errorf("Case %d (concurrent): unexpected Jacobian.\nwant: %v\ngot: %v",
|
t.Errorf("Case %d (concurrent): unexpected Jacobian.\nwant: %v\ngot: %v",
|
||||||
tc, mat64.Formatted(want, mat64.Prefix(" ")), mat64.Formatted(got, mat64.Prefix(" ")))
|
tc, mat.Formatted(want, mat.Prefix(" ")), mat.Formatted(got, mat.Prefix(" ")))
|
||||||
}
|
}
|
||||||
if !floats.Equal(x, xcopy) {
|
if !floats.Equal(x, xcopy) {
|
||||||
t.Errorf("Case %d (concurrent): x modified", tc)
|
t.Errorf("Case %d (concurrent): x modified", tc)
|
||||||
@@ -224,9 +224,9 @@ func TestJacobian(t *testing.T) {
|
|||||||
Formula: test.formula,
|
Formula: test.formula,
|
||||||
OriginValue: origin,
|
OriginValue: origin,
|
||||||
})
|
})
|
||||||
if !mat64.EqualApprox(want, got, test.tol) {
|
if !mat.EqualApprox(want, got, test.tol) {
|
||||||
t.Errorf("Case %d (origin): unexpected Jacobian.\nwant: %v\ngot: %v",
|
t.Errorf("Case %d (origin): unexpected Jacobian.\nwant: %v\ngot: %v",
|
||||||
tc, mat64.Formatted(want, mat64.Prefix(" ")), mat64.Formatted(got, mat64.Prefix(" ")))
|
tc, mat.Formatted(want, mat.Prefix(" ")), mat.Formatted(got, mat.Prefix(" ")))
|
||||||
}
|
}
|
||||||
if !floats.Equal(x, xcopy) {
|
if !floats.Equal(x, xcopy) {
|
||||||
t.Errorf("Case %d (origin): x modified", tc)
|
t.Errorf("Case %d (origin): x modified", tc)
|
||||||
@@ -238,9 +238,9 @@ func TestJacobian(t *testing.T) {
|
|||||||
OriginValue: origin,
|
OriginValue: origin,
|
||||||
Concurrent: true,
|
Concurrent: true,
|
||||||
})
|
})
|
||||||
if !mat64.EqualApprox(want, got, test.tol) {
|
if !mat.EqualApprox(want, got, test.tol) {
|
||||||
t.Errorf("Case %d (concurrent, origin): unexpected Jacobian.\nwant: %v\ngot: %v",
|
t.Errorf("Case %d (concurrent, origin): unexpected Jacobian.\nwant: %v\ngot: %v",
|
||||||
tc, mat64.Formatted(want, mat64.Prefix(" ")), mat64.Formatted(got, mat64.Prefix(" ")))
|
tc, mat.Formatted(want, mat.Prefix(" ")), mat.Formatted(got, mat.Prefix(" ")))
|
||||||
}
|
}
|
||||||
if !floats.Equal(x, xcopy) {
|
if !floats.Equal(x, xcopy) {
|
||||||
t.Errorf("Case %d (concurrent, origin): x modified", tc)
|
t.Errorf("Case %d (concurrent, origin): x modified", tc)
|
||||||
@@ -258,7 +258,7 @@ func randomSlice(n int, bound float64) []float64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// fillNaNDense fills the matrix m with NaN values.
|
// fillNaNDense fills the matrix m with NaN values.
|
||||||
func fillNaNDense(m *mat64.Dense) {
|
func fillNaNDense(m *mat.Dense) {
|
||||||
r, c := m.Dims()
|
r, c := m.Dims()
|
||||||
for i := 0; i < r; i++ {
|
for i := 0; i < r; i++ {
|
||||||
for j := 0; j < c; j++ {
|
for j := 0; j < c; j++ {
|
||||||
|
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/graph"
|
"gonum.org/v1/gonum/graph"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PageRank returns the PageRank weights for nodes of the directed graph g
|
// PageRank returns the PageRank weights for nodes of the directed graph g
|
||||||
@@ -31,7 +31,7 @@ func PageRank(g graph.Directed, damp, tol float64) map[int]float64 {
|
|||||||
indexOf[n.ID()] = i
|
indexOf[n.ID()] = i
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mat64.NewDense(len(nodes), len(nodes), nil)
|
m := mat.NewDense(len(nodes), len(nodes), nil)
|
||||||
dangling := damp / float64(len(nodes))
|
dangling := damp / float64(len(nodes))
|
||||||
for j, u := range nodes {
|
for j, u := range nodes {
|
||||||
to := g.From(u)
|
to := g.From(u)
|
||||||
@@ -45,17 +45,17 @@ func PageRank(g graph.Directed, damp, tol float64) map[int]float64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mat := m.RawMatrix().Data
|
matrix := m.RawMatrix().Data
|
||||||
dt := (1 - damp) / float64(len(nodes))
|
dt := (1 - damp) / float64(len(nodes))
|
||||||
for i := range mat {
|
for i := range matrix {
|
||||||
mat[i] += dt
|
matrix[i] += dt
|
||||||
}
|
}
|
||||||
|
|
||||||
last := make([]float64, len(nodes))
|
last := make([]float64, len(nodes))
|
||||||
for i := range last {
|
for i := range last {
|
||||||
last[i] = 1
|
last[i] = 1
|
||||||
}
|
}
|
||||||
lastV := mat64.NewVector(len(nodes), last)
|
lastV := mat.NewVector(len(nodes), last)
|
||||||
|
|
||||||
vec := make([]float64, len(nodes))
|
vec := make([]float64, len(nodes))
|
||||||
var sum float64
|
var sum float64
|
||||||
@@ -68,7 +68,7 @@ func PageRank(g graph.Directed, damp, tol float64) map[int]float64 {
|
|||||||
for i := range vec {
|
for i := range vec {
|
||||||
vec[i] *= f
|
vec[i] *= f
|
||||||
}
|
}
|
||||||
v := mat64.NewVector(len(nodes), vec)
|
v := mat.NewVector(len(nodes), vec)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
lastV, v = v, lastV
|
lastV, v = v, lastV
|
||||||
@@ -122,7 +122,7 @@ func PageRankSparse(g graph.Directed, damp, tol float64) map[int]float64 {
|
|||||||
for i := range last {
|
for i := range last {
|
||||||
last[i] = 1
|
last[i] = 1
|
||||||
}
|
}
|
||||||
lastV := mat64.NewVector(len(nodes), last)
|
lastV := mat.NewVector(len(nodes), last)
|
||||||
|
|
||||||
vec := make([]float64, len(nodes))
|
vec := make([]float64, len(nodes))
|
||||||
var sum float64
|
var sum float64
|
||||||
@@ -135,7 +135,7 @@ func PageRankSparse(g graph.Directed, damp, tol float64) map[int]float64 {
|
|||||||
for i := range vec {
|
for i := range vec {
|
||||||
vec[i] *= f
|
vec[i] *= f
|
||||||
}
|
}
|
||||||
v := mat64.NewVector(len(nodes), vec)
|
v := mat.NewVector(len(nodes), vec)
|
||||||
|
|
||||||
dt := (1 - damp) / float64(len(nodes))
|
dt := (1 - damp) / float64(len(nodes))
|
||||||
for {
|
for {
|
||||||
@@ -171,7 +171,7 @@ func (m rowCompressedMatrix) addTo(i, j int, v float64) { m[i].addTo(j, v) }
|
|||||||
// mulVecUnitary multiplies the receiver by the src vector, storing
|
// mulVecUnitary multiplies the receiver by the src vector, storing
|
||||||
// the result in dst. It assumes src and dst are the same length as m
|
// the result in dst. It assumes src and dst are the same length as m
|
||||||
// and that both have unitary vector increments.
|
// and that both have unitary vector increments.
|
||||||
func (m rowCompressedMatrix) mulVecUnitary(dst, src *mat64.Vector) {
|
func (m rowCompressedMatrix) mulVecUnitary(dst, src *mat.Vector) {
|
||||||
dMat := dst.RawVector().Data
|
dMat := dst.RawVector().Data
|
||||||
for i, r := range m {
|
for i, r := range m {
|
||||||
dMat[i] = r.dotUnitary(src)
|
dMat[i] = r.dotUnitary(src)
|
||||||
@@ -190,7 +190,7 @@ func (r *compressedRow) addTo(j int, v float64) {
|
|||||||
|
|
||||||
// dotUnitary performs a simplified scatter-based Ddot operations on
|
// dotUnitary performs a simplified scatter-based Ddot operations on
|
||||||
// v and the receiver. v must have have a unitary vector increment.
|
// v and the receiver. v must have have a unitary vector increment.
|
||||||
func (r compressedRow) dotUnitary(v *mat64.Vector) float64 {
|
func (r compressedRow) dotUnitary(v *mat.Vector) float64 {
|
||||||
var sum float64
|
var sum float64
|
||||||
vec := v.RawVector().Data
|
vec := v.RawVector().Data
|
||||||
for _, e := range r {
|
for _, e := range r {
|
||||||
@@ -208,7 +208,7 @@ type sparseElement struct {
|
|||||||
// onesDotUnitary performs the equivalent of a Ddot of v with
|
// onesDotUnitary performs the equivalent of a Ddot of v with
|
||||||
// a ones vector of equal length. v must have have a unitary
|
// a ones vector of equal length. v must have have a unitary
|
||||||
// vector increment.
|
// vector increment.
|
||||||
func onesDotUnitary(alpha float64, v *mat64.Vector) float64 {
|
func onesDotUnitary(alpha float64, v *mat.Vector) float64 {
|
||||||
var sum float64
|
var sum float64
|
||||||
for _, f := range v.RawVector().Data {
|
for _, f := range v.RawVector().Data {
|
||||||
sum += alpha * f
|
sum += alpha * f
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/graph"
|
"gonum.org/v1/gonum/graph"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Shortest is a shortest-path tree created by the BellmanFordFrom or DijkstraFrom
|
// Shortest is a shortest-path tree created by the BellmanFordFrom or DijkstraFrom
|
||||||
@@ -125,7 +125,7 @@ type AllShortest struct {
|
|||||||
//
|
//
|
||||||
// dist contains the pairwise
|
// dist contains the pairwise
|
||||||
// distances between nodes.
|
// distances between nodes.
|
||||||
dist *mat64.Dense
|
dist *mat.Dense
|
||||||
// next contains the shortest-path
|
// next contains the shortest-path
|
||||||
// tree of the graph. The first index
|
// tree of the graph. The first index
|
||||||
// is a linear mapping of from-dense-id
|
// is a linear mapping of from-dense-id
|
||||||
@@ -159,7 +159,7 @@ func newAllShortest(nodes []graph.Node, forward bool) AllShortest {
|
|||||||
nodes: nodes,
|
nodes: nodes,
|
||||||
indexOf: indexOf,
|
indexOf: indexOf,
|
||||||
|
|
||||||
dist: mat64.NewDense(len(nodes), len(nodes), dist),
|
dist: mat.NewDense(len(nodes), len(nodes), dist),
|
||||||
next: make([][]int, len(nodes)*len(nodes)),
|
next: make([][]int, len(nodes)*len(nodes)),
|
||||||
forward: forward,
|
forward: forward,
|
||||||
}
|
}
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
"gonum.org/v1/gonum/graph"
|
"gonum.org/v1/gonum/graph"
|
||||||
"gonum.org/v1/gonum/graph/internal/ordered"
|
"gonum.org/v1/gonum/graph/internal/ordered"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DirectedMatrix represents a directed graph using an adjacency
|
// DirectedMatrix represents a directed graph using an adjacency
|
||||||
@@ -17,7 +17,7 @@ import (
|
|||||||
// Edges are stored implicitly as an edge weight, so edges stored in
|
// Edges are stored implicitly as an edge weight, so edges stored in
|
||||||
// the graph are not recoverable.
|
// the graph are not recoverable.
|
||||||
type DirectedMatrix struct {
|
type DirectedMatrix struct {
|
||||||
mat *mat64.Dense
|
mat *mat.Dense
|
||||||
nodes []graph.Node
|
nodes []graph.Node
|
||||||
|
|
||||||
self float64
|
self float64
|
||||||
@@ -29,17 +29,17 @@ type DirectedMatrix struct {
|
|||||||
// specifies the cost of self connection, and absent specifies the weight
|
// specifies the cost of self connection, and absent specifies the weight
|
||||||
// returned for absent edges.
|
// returned for absent edges.
|
||||||
func NewDirectedMatrix(n int, init, self, absent float64) *DirectedMatrix {
|
func NewDirectedMatrix(n int, init, self, absent float64) *DirectedMatrix {
|
||||||
mat := make([]float64, n*n)
|
matrix := make([]float64, n*n)
|
||||||
if init != 0 {
|
if init != 0 {
|
||||||
for i := range mat {
|
for i := range matrix {
|
||||||
mat[i] = init
|
matrix[i] = init
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < len(mat); i += n + 1 {
|
for i := 0; i < len(matrix); i += n + 1 {
|
||||||
mat[i] = self
|
matrix[i] = self
|
||||||
}
|
}
|
||||||
return &DirectedMatrix{
|
return &DirectedMatrix{
|
||||||
mat: mat64.NewDense(n, n, mat),
|
mat: mat.NewDense(n, n, matrix),
|
||||||
self: self,
|
self: self,
|
||||||
absent: absent,
|
absent: absent,
|
||||||
}
|
}
|
||||||
@@ -255,10 +255,10 @@ func (g *DirectedMatrix) Degree(n graph.Node) int {
|
|||||||
return deg
|
return deg
|
||||||
}
|
}
|
||||||
|
|
||||||
// Matrix returns the mat64.Matrix representation of the graph. The orientation
|
// Matrix returns the mat.Matrix representation of the graph. The orientation
|
||||||
// of the matrix is such that the matrix entry at G_{ij} is the weight of the edge
|
// of the matrix is such that the matrix entry at G_{ij} is the weight of the edge
|
||||||
// from node i to node j.
|
// from node i to node j.
|
||||||
func (g *DirectedMatrix) Matrix() mat64.Matrix {
|
func (g *DirectedMatrix) Matrix() mat.Matrix {
|
||||||
// Prevent alteration of dimensions of the returned matrix.
|
// Prevent alteration of dimensions of the returned matrix.
|
||||||
m := *g.mat
|
m := *g.mat
|
||||||
return &m
|
return &m
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
"gonum.org/v1/gonum/graph"
|
"gonum.org/v1/gonum/graph"
|
||||||
"gonum.org/v1/gonum/graph/internal/ordered"
|
"gonum.org/v1/gonum/graph/internal/ordered"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// UndirectedMatrix represents an undirected graph using an adjacency
|
// UndirectedMatrix represents an undirected graph using an adjacency
|
||||||
@@ -17,7 +17,7 @@ import (
|
|||||||
// Edges are stored implicitly as an edge weight, so edges stored in
|
// Edges are stored implicitly as an edge weight, so edges stored in
|
||||||
// the graph are not recoverable.
|
// the graph are not recoverable.
|
||||||
type UndirectedMatrix struct {
|
type UndirectedMatrix struct {
|
||||||
mat *mat64.SymDense
|
mat *mat.SymDense
|
||||||
nodes []graph.Node
|
nodes []graph.Node
|
||||||
|
|
||||||
self float64
|
self float64
|
||||||
@@ -29,17 +29,17 @@ type UndirectedMatrix struct {
|
|||||||
// specifies the cost of self connection, and absent specifies the weight
|
// specifies the cost of self connection, and absent specifies the weight
|
||||||
// returned for absent edges.
|
// returned for absent edges.
|
||||||
func NewUndirectedMatrix(n int, init, self, absent float64) *UndirectedMatrix {
|
func NewUndirectedMatrix(n int, init, self, absent float64) *UndirectedMatrix {
|
||||||
mat := make([]float64, n*n)
|
matrix := make([]float64, n*n)
|
||||||
if init != 0 {
|
if init != 0 {
|
||||||
for i := range mat {
|
for i := range matrix {
|
||||||
mat[i] = init
|
matrix[i] = init
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < len(mat); i += n + 1 {
|
for i := 0; i < len(matrix); i += n + 1 {
|
||||||
mat[i] = self
|
matrix[i] = self
|
||||||
}
|
}
|
||||||
return &UndirectedMatrix{
|
return &UndirectedMatrix{
|
||||||
mat: mat64.NewSymDense(n, mat),
|
mat: mat.NewSymDense(n, matrix),
|
||||||
self: self,
|
self: self,
|
||||||
absent: absent,
|
absent: absent,
|
||||||
}
|
}
|
||||||
@@ -216,8 +216,8 @@ func (g *UndirectedMatrix) Degree(n graph.Node) int {
|
|||||||
return deg
|
return deg
|
||||||
}
|
}
|
||||||
|
|
||||||
// Matrix returns the mat64.Matrix representation of the graph.
|
// Matrix returns the mat.Matrix representation of the graph.
|
||||||
func (g *UndirectedMatrix) Matrix() mat64.Matrix {
|
func (g *UndirectedMatrix) Matrix() mat.Matrix {
|
||||||
// Prevent alteration of dimensions of the returned matrix.
|
// Prevent alteration of dimensions of the returned matrix.
|
||||||
m := *g.mat
|
m := *g.mat
|
||||||
return &m
|
return &m
|
||||||
|
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
"gonum.org/v1/gonum/graph"
|
"gonum.org/v1/gonum/graph"
|
||||||
"gonum.org/v1/gonum/graph/simple"
|
"gonum.org/v1/gonum/graph/simple"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
var directedGraphs = []struct {
|
var directedGraphs = []struct {
|
||||||
@@ -19,7 +19,7 @@ var directedGraphs = []struct {
|
|||||||
absent float64
|
absent float64
|
||||||
merge func(x, y float64, xe, ye graph.Edge) float64
|
merge func(x, y float64, xe, ye graph.Edge) float64
|
||||||
|
|
||||||
want mat64.Matrix
|
want mat.Matrix
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
g: func() graph.DirectedBuilder { return simple.NewDirectedGraph(0, 0) },
|
g: func() graph.DirectedBuilder { return simple.NewDirectedGraph(0, 0) },
|
||||||
@@ -28,7 +28,7 @@ var directedGraphs = []struct {
|
|||||||
{F: simple.Node(1), T: simple.Node(0), W: 1},
|
{F: simple.Node(1), T: simple.Node(0), W: 1},
|
||||||
{F: simple.Node(1), T: simple.Node(2), W: 1},
|
{F: simple.Node(1), T: simple.Node(2), W: 1},
|
||||||
},
|
},
|
||||||
want: mat64.NewSymDense(3, []float64{
|
want: mat.NewSymDense(3, []float64{
|
||||||
0, (1. + 2.) / 2., 0,
|
0, (1. + 2.) / 2., 0,
|
||||||
(1. + 2.) / 2., 0, 1. / 2.,
|
(1. + 2.) / 2., 0, 1. / 2.,
|
||||||
0, 1. / 2., 0,
|
0, 1. / 2., 0,
|
||||||
@@ -43,7 +43,7 @@ var directedGraphs = []struct {
|
|||||||
},
|
},
|
||||||
absent: 1,
|
absent: 1,
|
||||||
merge: func(x, y float64, _, _ graph.Edge) float64 { return math.Sqrt(x * y) },
|
merge: func(x, y float64, _, _ graph.Edge) float64 { return math.Sqrt(x * y) },
|
||||||
want: mat64.NewSymDense(3, []float64{
|
want: mat.NewSymDense(3, []float64{
|
||||||
0, math.Sqrt(1 * 2), 0,
|
0, math.Sqrt(1 * 2), 0,
|
||||||
math.Sqrt(1 * 2), 0, math.Sqrt(1 * 1),
|
math.Sqrt(1 * 2), 0, math.Sqrt(1 * 1),
|
||||||
0, math.Sqrt(1 * 1), 0,
|
0, math.Sqrt(1 * 1), 0,
|
||||||
@@ -57,7 +57,7 @@ var directedGraphs = []struct {
|
|||||||
{F: simple.Node(1), T: simple.Node(2), W: 1},
|
{F: simple.Node(1), T: simple.Node(2), W: 1},
|
||||||
},
|
},
|
||||||
merge: func(x, y float64, _, _ graph.Edge) float64 { return math.Min(x, y) },
|
merge: func(x, y float64, _, _ graph.Edge) float64 { return math.Min(x, y) },
|
||||||
want: mat64.NewSymDense(3, []float64{
|
want: mat.NewSymDense(3, []float64{
|
||||||
0, math.Min(1, 2), 0,
|
0, math.Min(1, 2), 0,
|
||||||
math.Min(1, 2), 0, math.Min(1, 0),
|
math.Min(1, 2), 0, math.Min(1, 0),
|
||||||
0, math.Min(1, 0), 0,
|
0, math.Min(1, 0), 0,
|
||||||
@@ -79,7 +79,7 @@ var directedGraphs = []struct {
|
|||||||
}
|
}
|
||||||
return math.Min(x, y)
|
return math.Min(x, y)
|
||||||
},
|
},
|
||||||
want: mat64.NewSymDense(3, []float64{
|
want: mat.NewSymDense(3, []float64{
|
||||||
0, math.Min(1, 2), 0,
|
0, math.Min(1, 2), 0,
|
||||||
math.Min(1, 2), 0, 1,
|
math.Min(1, 2), 0, 1,
|
||||||
0, 1, 0,
|
0, 1, 0,
|
||||||
@@ -93,7 +93,7 @@ var directedGraphs = []struct {
|
|||||||
{F: simple.Node(1), T: simple.Node(2), W: 1},
|
{F: simple.Node(1), T: simple.Node(2), W: 1},
|
||||||
},
|
},
|
||||||
merge: func(x, y float64, _, _ graph.Edge) float64 { return math.Max(x, y) },
|
merge: func(x, y float64, _, _ graph.Edge) float64 { return math.Max(x, y) },
|
||||||
want: mat64.NewSymDense(3, []float64{
|
want: mat.NewSymDense(3, []float64{
|
||||||
0, math.Max(1, 2), 0,
|
0, math.Max(1, 2), 0,
|
||||||
math.Max(1, 2), 0, math.Max(1, 0),
|
math.Max(1, 2), 0, math.Max(1, 0),
|
||||||
0, math.Max(1, 0), 0,
|
0, math.Max(1, 0), 0,
|
||||||
@@ -116,10 +116,10 @@ func TestUndirect(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !mat64.Equal(dst.Matrix(), test.want) {
|
if !mat.Equal(dst.Matrix(), test.want) {
|
||||||
t.Errorf("unexpected result:\ngot:\n%.4v\nwant:\n%.4v",
|
t.Errorf("unexpected result:\ngot:\n%.4v\nwant:\n%.4v",
|
||||||
mat64.Formatted(dst.Matrix()),
|
mat.Formatted(dst.Matrix()),
|
||||||
mat64.Formatted(test.want),
|
mat.Formatted(test.want),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -7,7 +7,7 @@ package optimize
|
|||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BFGS implements the Broyden–Fletcher–Goldfarb–Shanno optimization method. It
|
// BFGS implements the Broyden–Fletcher–Goldfarb–Shanno optimization method. It
|
||||||
@@ -24,13 +24,13 @@ type BFGS struct {
|
|||||||
ls *LinesearchMethod
|
ls *LinesearchMethod
|
||||||
|
|
||||||
dim int
|
dim int
|
||||||
x mat64.Vector // Location of the last major iteration.
|
x mat.Vector // Location of the last major iteration.
|
||||||
grad mat64.Vector // Gradient at the last major iteration.
|
grad mat.Vector // Gradient at the last major iteration.
|
||||||
s mat64.Vector // Difference between locations in this and the previous iteration.
|
s mat.Vector // Difference between locations in this and the previous iteration.
|
||||||
y mat64.Vector // Difference between gradients in this and the previous iteration.
|
y mat.Vector // Difference between gradients in this and the previous iteration.
|
||||||
tmp mat64.Vector
|
tmp mat.Vector
|
||||||
|
|
||||||
invHess *mat64.SymDense
|
invHess *mat.SymDense
|
||||||
|
|
||||||
first bool // Indicator of the first iteration.
|
first bool // Indicator of the first iteration.
|
||||||
}
|
}
|
||||||
@@ -57,8 +57,8 @@ func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
|
|||||||
b.dim = dim
|
b.dim = dim
|
||||||
b.first = true
|
b.first = true
|
||||||
|
|
||||||
x := mat64.NewVector(dim, loc.X)
|
x := mat.NewVector(dim, loc.X)
|
||||||
grad := mat64.NewVector(dim, loc.Gradient)
|
grad := mat.NewVector(dim, loc.Gradient)
|
||||||
b.x.CloneVec(x)
|
b.x.CloneVec(x)
|
||||||
b.grad.CloneVec(grad)
|
b.grad.CloneVec(grad)
|
||||||
|
|
||||||
@@ -67,18 +67,18 @@ func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
|
|||||||
b.tmp.Reset()
|
b.tmp.Reset()
|
||||||
|
|
||||||
if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
|
if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
|
||||||
b.invHess = mat64.NewSymDense(dim, nil)
|
b.invHess = mat.NewSymDense(dim, nil)
|
||||||
} else {
|
} else {
|
||||||
b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
|
b.invHess = mat.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
|
||||||
}
|
}
|
||||||
// The values of the inverse Hessian are initialized in the first call to
|
// The values of the inverse Hessian are initialized in the first call to
|
||||||
// NextDirection.
|
// NextDirection.
|
||||||
|
|
||||||
// Initial direction is just negative of the gradient because the Hessian
|
// Initial direction is just negative of the gradient because the Hessian
|
||||||
// is an identity matrix.
|
// is an identity matrix.
|
||||||
d := mat64.NewVector(dim, dir)
|
d := mat.NewVector(dim, dir)
|
||||||
d.ScaleVec(-1, grad)
|
d.ScaleVec(-1, grad)
|
||||||
return 1 / mat64.Norm(d, 2)
|
return 1 / mat.Norm(d, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
|
func (b *BFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
|
||||||
@@ -93,21 +93,21 @@ func (b *BFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
|
|||||||
panic("bfgs: unexpected size mismatch")
|
panic("bfgs: unexpected size mismatch")
|
||||||
}
|
}
|
||||||
|
|
||||||
x := mat64.NewVector(dim, loc.X)
|
x := mat.NewVector(dim, loc.X)
|
||||||
grad := mat64.NewVector(dim, loc.Gradient)
|
grad := mat.NewVector(dim, loc.Gradient)
|
||||||
|
|
||||||
// s = x_{k+1} - x_{k}
|
// s = x_{k+1} - x_{k}
|
||||||
b.s.SubVec(x, &b.x)
|
b.s.SubVec(x, &b.x)
|
||||||
// y = g_{k+1} - g_{k}
|
// y = g_{k+1} - g_{k}
|
||||||
b.y.SubVec(grad, &b.grad)
|
b.y.SubVec(grad, &b.grad)
|
||||||
|
|
||||||
sDotY := mat64.Dot(&b.s, &b.y)
|
sDotY := mat.Dot(&b.s, &b.y)
|
||||||
|
|
||||||
if b.first {
|
if b.first {
|
||||||
// Rescale the initial Hessian.
|
// Rescale the initial Hessian.
|
||||||
// From: Nocedal, J., Wright, S.: Numerical Optimization (2nd ed).
|
// From: Nocedal, J., Wright, S.: Numerical Optimization (2nd ed).
|
||||||
// Springer (2006), page 143, eq. 6.20.
|
// Springer (2006), page 143, eq. 6.20.
|
||||||
yDotY := mat64.Dot(&b.y, &b.y)
|
yDotY := mat.Dot(&b.y, &b.y)
|
||||||
scale := sDotY / yDotY
|
scale := sDotY / yDotY
|
||||||
for i := 0; i < dim; i++ {
|
for i := 0; i < dim; i++ {
|
||||||
for j := i; j < dim; j++ {
|
for j := i; j < dim; j++ {
|
||||||
@@ -130,7 +130,7 @@ func (b *BFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
|
|||||||
//
|
//
|
||||||
// Note that y_k^T B_k^-1 y_k is a scalar, and that the third term is a
|
// Note that y_k^T B_k^-1 y_k is a scalar, and that the third term is a
|
||||||
// rank-two update where B_k^-1 y_k is one vector and s_k is the other.
|
// rank-two update where B_k^-1 y_k is one vector and s_k is the other.
|
||||||
yBy := mat64.Inner(&b.y, b.invHess, &b.y)
|
yBy := mat.Inner(&b.y, b.invHess, &b.y)
|
||||||
b.tmp.MulVec(b.invHess, &b.y)
|
b.tmp.MulVec(b.invHess, &b.y)
|
||||||
scale := (1 + yBy/sDotY) / sDotY
|
scale := (1 + yBy/sDotY) / sDotY
|
||||||
b.invHess.SymRankOne(b.invHess, scale, &b.s)
|
b.invHess.SymRankOne(b.invHess, scale, &b.s)
|
||||||
@@ -142,7 +142,7 @@ func (b *BFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
|
|||||||
b.grad.CopyVec(grad)
|
b.grad.CopyVec(grad)
|
||||||
|
|
||||||
// New direction is stored in dir.
|
// New direction is stored in dir.
|
||||||
d := mat64.NewVector(dim, dir)
|
d := mat.NewVector(dim, dir)
|
||||||
d.MulVec(b.invHess, grad)
|
d.MulVec(b.invHess, grad)
|
||||||
d.ScaleVec(-1, d)
|
d.ScaleVec(-1, d)
|
||||||
|
|
||||||
|
@@ -6,7 +6,7 @@ package lp
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(btracey): Have some sort of preprocessing step for helping to fix A to make it
|
// TODO(btracey): Have some sort of preprocessing step for helping to fix A to make it
|
||||||
@@ -27,7 +27,7 @@ import (
|
|||||||
// s.t aNew * x = bNew
|
// s.t aNew * x = bNew
|
||||||
// x >= 0
|
// x >= 0
|
||||||
// If there are no constraints of the given type, the inputs may be nil.
|
// If there are no constraints of the given type, the inputs may be nil.
|
||||||
func Convert(c []float64, g mat64.Matrix, h []float64, a mat64.Matrix, b []float64) (cNew []float64, aNew *mat64.Dense, bNew []float64) {
|
func Convert(c []float64, g mat.Matrix, h []float64, a mat.Matrix, b []float64) (cNew []float64, aNew *mat.Dense, bNew []float64) {
|
||||||
nVar := len(c)
|
nVar := len(c)
|
||||||
nIneq := len(h)
|
nIneq := len(h)
|
||||||
|
|
||||||
@@ -120,21 +120,21 @@ func Convert(c []float64, g mat64.Matrix, h []float64, a mat64.Matrix, b []float
|
|||||||
copy(bNew[nIneq:], b)
|
copy(bNew[nIneq:], b)
|
||||||
|
|
||||||
// Construct aNew = [G, -G, I; A, -A, 0].
|
// Construct aNew = [G, -G, I; A, -A, 0].
|
||||||
aNew = mat64.NewDense(nNewEq, nNewVar, nil)
|
aNew = mat.NewDense(nNewEq, nNewVar, nil)
|
||||||
if nIneq != 0 {
|
if nIneq != 0 {
|
||||||
aView := (aNew.View(0, 0, nIneq, nVar)).(*mat64.Dense)
|
aView := (aNew.View(0, 0, nIneq, nVar)).(*mat.Dense)
|
||||||
aView.Copy(g)
|
aView.Copy(g)
|
||||||
aView = (aNew.View(0, nVar, nIneq, nVar)).(*mat64.Dense)
|
aView = (aNew.View(0, nVar, nIneq, nVar)).(*mat.Dense)
|
||||||
aView.Scale(-1, g)
|
aView.Scale(-1, g)
|
||||||
aView = (aNew.View(0, 2*nVar, nIneq, nIneq)).(*mat64.Dense)
|
aView = (aNew.View(0, 2*nVar, nIneq, nIneq)).(*mat.Dense)
|
||||||
for i := 0; i < nIneq; i++ {
|
for i := 0; i < nIneq; i++ {
|
||||||
aView.Set(i, i, 1)
|
aView.Set(i, i, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if nEq != 0 {
|
if nEq != 0 {
|
||||||
aView := (aNew.View(nIneq, 0, nEq, nVar)).(*mat64.Dense)
|
aView := (aNew.View(nIneq, 0, nEq, nVar)).(*mat.Dense)
|
||||||
aView.Copy(a)
|
aView.Copy(a)
|
||||||
aView = (aNew.View(nIneq, nVar, nEq, nVar)).(*mat64.Dense)
|
aView = (aNew.View(nIneq, nVar, nEq, nVar)).(*mat.Dense)
|
||||||
aView.Scale(-1, a)
|
aView.Scale(-1, a)
|
||||||
}
|
}
|
||||||
return cNew, aNew, bNew
|
return cNew, aNew, bNew
|
||||||
|
@@ -11,7 +11,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(btracey): Could have a solver structure with an abstract factorizer. With
|
// TODO(btracey): Could have a solver structure with an abstract factorizer. With
|
||||||
@@ -85,12 +85,12 @@ const (
|
|||||||
// Strang, Gilbert. "Linear Algebra and Applications." Academic, New York (1976).
|
// Strang, Gilbert. "Linear Algebra and Applications." Academic, New York (1976).
|
||||||
// For a detailed video introduction, see lectures 11-13 of UC Math 352
|
// For a detailed video introduction, see lectures 11-13 of UC Math 352
|
||||||
// https://www.youtube.com/watch?v=ESzYPFkY3og&index=11&list=PLh464gFUoJWOmBYla3zbZbc4nv2AXez6X.
|
// https://www.youtube.com/watch?v=ESzYPFkY3og&index=11&list=PLh464gFUoJWOmBYla3zbZbc4nv2AXez6X.
|
||||||
func Simplex(c []float64, A mat64.Matrix, b []float64, tol float64, initialBasic []int) (optF float64, optX []float64, err error) {
|
func Simplex(c []float64, A mat.Matrix, b []float64, tol float64, initialBasic []int) (optF float64, optX []float64, err error) {
|
||||||
ans, x, _, err := simplex(initialBasic, c, A, b, tol)
|
ans, x, _, err := simplex(initialBasic, c, A, b, tol)
|
||||||
return ans, x, err
|
return ans, x, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func simplex(initialBasic []int, c []float64, A mat64.Matrix, b []float64, tol float64) (float64, []float64, []int, error) {
|
func simplex(initialBasic []int, c []float64, A mat.Matrix, b []float64, tol float64) (float64, []float64, []int, error) {
|
||||||
err := verifyInputs(initialBasic, c, A, b)
|
err := verifyInputs(initialBasic, c, A, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == ErrUnbounded {
|
if err == ErrUnbounded {
|
||||||
@@ -123,7 +123,7 @@ func simplex(initialBasic []int, c []float64, A mat64.Matrix, b []float64, tol f
|
|||||||
// solution.
|
// solution.
|
||||||
|
|
||||||
var basicIdxs []int // The indices of the non-zero x values.
|
var basicIdxs []int // The indices of the non-zero x values.
|
||||||
var ab *mat64.Dense // The subset of columns of A listed in basicIdxs.
|
var ab *mat.Dense // The subset of columns of A listed in basicIdxs.
|
||||||
var xb []float64 // The non-zero elements of x. xb = ab^-1 b
|
var xb []float64 // The non-zero elements of x. xb = ab^-1 b
|
||||||
|
|
||||||
if initialBasic != nil {
|
if initialBasic != nil {
|
||||||
@@ -131,7 +131,7 @@ func simplex(initialBasic []int, c []float64, A mat64.Matrix, b []float64, tol f
|
|||||||
if len(initialBasic) != m {
|
if len(initialBasic) != m {
|
||||||
panic("lp: incorrect number of initial vectors")
|
panic("lp: incorrect number of initial vectors")
|
||||||
}
|
}
|
||||||
ab = mat64.NewDense(m, len(initialBasic), nil)
|
ab = mat.NewDense(m, len(initialBasic), nil)
|
||||||
extractColumns(ab, A, initialBasic)
|
extractColumns(ab, A, initialBasic)
|
||||||
xb = make([]float64, m)
|
xb = make([]float64, m)
|
||||||
err = initializeFromBasic(xb, ab, b)
|
err = initializeFromBasic(xb, ab, b)
|
||||||
@@ -175,11 +175,11 @@ func simplex(initialBasic []int, c []float64, A mat64.Matrix, b []float64, tol f
|
|||||||
for i, idx := range nonBasicIdx {
|
for i, idx := range nonBasicIdx {
|
||||||
cn[i] = c[idx]
|
cn[i] = c[idx]
|
||||||
}
|
}
|
||||||
an := mat64.NewDense(m, len(nonBasicIdx), nil)
|
an := mat.NewDense(m, len(nonBasicIdx), nil)
|
||||||
extractColumns(an, A, nonBasicIdx)
|
extractColumns(an, A, nonBasicIdx)
|
||||||
|
|
||||||
bVec := mat64.NewVector(len(b), b)
|
bVec := mat.NewVector(len(b), b)
|
||||||
cbVec := mat64.NewVector(len(cb), cb)
|
cbVec := mat.NewVector(len(cb), cb)
|
||||||
|
|
||||||
// Temporary data needed each iteration. (Described later)
|
// Temporary data needed each iteration. (Described later)
|
||||||
r := make([]float64, n-m)
|
r := make([]float64, n-m)
|
||||||
@@ -214,13 +214,13 @@ func simplex(initialBasic []int, c []float64, A mat64.Matrix, b []float64, tol f
|
|||||||
// of the rule in step 4 to avoid cycling.
|
// of the rule in step 4 to avoid cycling.
|
||||||
for {
|
for {
|
||||||
// Compute reduced costs -- r = cn - an^T ab^-T cb
|
// Compute reduced costs -- r = cn - an^T ab^-T cb
|
||||||
var tmp mat64.Vector
|
var tmp mat.Vector
|
||||||
err = tmp.SolveVec(ab.T(), cbVec)
|
err = tmp.SolveVec(ab.T(), cbVec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
data := make([]float64, n-m)
|
data := make([]float64, n-m)
|
||||||
tmp2 := mat64.NewVector(n-m, data)
|
tmp2 := mat.NewVector(n-m, data)
|
||||||
tmp2.MulVec(an.T(), &tmp)
|
tmp2.MulVec(an.T(), &tmp)
|
||||||
floats.SubTo(r, cn, data)
|
floats.SubTo(r, cn, data)
|
||||||
|
|
||||||
@@ -261,13 +261,13 @@ func simplex(initialBasic []int, c []float64, A mat64.Matrix, b []float64, tol f
|
|||||||
// Replace the constrained basicIdx with the newIdx.
|
// Replace the constrained basicIdx with the newIdx.
|
||||||
basicIdxs[replace], nonBasicIdx[minIdx] = nonBasicIdx[minIdx], basicIdxs[replace]
|
basicIdxs[replace], nonBasicIdx[minIdx] = nonBasicIdx[minIdx], basicIdxs[replace]
|
||||||
cb[replace], cn[minIdx] = cn[minIdx], cb[replace]
|
cb[replace], cn[minIdx] = cn[minIdx], cb[replace]
|
||||||
tmpCol1 := mat64.Col(nil, replace, ab)
|
tmpCol1 := mat.Col(nil, replace, ab)
|
||||||
tmpCol2 := mat64.Col(nil, minIdx, an)
|
tmpCol2 := mat.Col(nil, minIdx, an)
|
||||||
ab.SetCol(replace, tmpCol2)
|
ab.SetCol(replace, tmpCol2)
|
||||||
an.SetCol(minIdx, tmpCol1)
|
an.SetCol(minIdx, tmpCol1)
|
||||||
|
|
||||||
// Compute the new xb.
|
// Compute the new xb.
|
||||||
xbVec := mat64.NewVector(len(xb), xb)
|
xbVec := mat.NewVector(len(xb), xb)
|
||||||
err = xbVec.SolveVec(ab, bVec)
|
err = xbVec.SolveVec(ab, bVec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
@@ -285,15 +285,15 @@ func simplex(initialBasic []int, c []float64, A mat64.Matrix, b []float64, tol f
|
|||||||
|
|
||||||
// computeMove computes how far can be moved replacing each index. The results
|
// computeMove computes how far can be moved replacing each index. The results
|
||||||
// are stored into move.
|
// are stored into move.
|
||||||
func computeMove(move []float64, minIdx int, A mat64.Matrix, ab *mat64.Dense, xb []float64, nonBasicIdx []int) error {
|
func computeMove(move []float64, minIdx int, A mat.Matrix, ab *mat.Dense, xb []float64, nonBasicIdx []int) error {
|
||||||
// Find ae.
|
// Find ae.
|
||||||
col := mat64.Col(nil, nonBasicIdx[minIdx], A)
|
col := mat.Col(nil, nonBasicIdx[minIdx], A)
|
||||||
aCol := mat64.NewVector(len(col), col)
|
aCol := mat.NewVector(len(col), col)
|
||||||
|
|
||||||
// d = - Ab^-1 Ae
|
// d = - Ab^-1 Ae
|
||||||
nb, _ := ab.Dims()
|
nb, _ := ab.Dims()
|
||||||
d := make([]float64, nb)
|
d := make([]float64, nb)
|
||||||
dVec := mat64.NewVector(nb, d)
|
dVec := mat.NewVector(nb, d)
|
||||||
err := dVec.SolveVec(ab, aCol)
|
err := dVec.SolveVec(ab, aCol)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ErrLinSolve
|
return ErrLinSolve
|
||||||
@@ -326,7 +326,7 @@ func computeMove(move []float64, minIdx int, A mat64.Matrix, ab *mat64.Dense, xb
|
|||||||
// replaceBland uses the Bland rule to find the indices to swap if the minimum
|
// replaceBland uses the Bland rule to find the indices to swap if the minimum
|
||||||
// move is 0. The indices to be swapped are replace and minIdx (following the
|
// move is 0. The indices to be swapped are replace and minIdx (following the
|
||||||
// nomenclature in the main routine).
|
// nomenclature in the main routine).
|
||||||
func replaceBland(A mat64.Matrix, ab *mat64.Dense, xb []float64, basicIdxs, nonBasicIdx []int, r, move []float64) (replace, minIdx int, err error) {
|
func replaceBland(A mat.Matrix, ab *mat.Dense, xb []float64, basicIdxs, nonBasicIdx []int, r, move []float64) (replace, minIdx int, err error) {
|
||||||
m, _ := A.Dims()
|
m, _ := A.Dims()
|
||||||
// Use the traditional bland rule, except don't replace a constraint which
|
// Use the traditional bland rule, except don't replace a constraint which
|
||||||
// causes the new ab to be singular.
|
// causes the new ab to be singular.
|
||||||
@@ -353,10 +353,10 @@ func replaceBland(A mat64.Matrix, ab *mat64.Dense, xb []float64, basicIdxs, nonB
|
|||||||
}
|
}
|
||||||
copy(biCopy, basicIdxs)
|
copy(biCopy, basicIdxs)
|
||||||
biCopy[replace] = nonBasicIdx[minIdx]
|
biCopy[replace] = nonBasicIdx[minIdx]
|
||||||
abTmp := mat64.NewDense(m, len(biCopy), nil)
|
abTmp := mat.NewDense(m, len(biCopy), nil)
|
||||||
extractColumns(abTmp, A, biCopy)
|
extractColumns(abTmp, A, biCopy)
|
||||||
// If the condition number is reasonable, use this index.
|
// If the condition number is reasonable, use this index.
|
||||||
if mat64.Cond(abTmp, 1) < 1e16 {
|
if mat.Cond(abTmp, 1) < 1e16 {
|
||||||
return replace, minIdx, nil
|
return replace, minIdx, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -364,7 +364,7 @@ func replaceBland(A mat64.Matrix, ab *mat64.Dense, xb []float64, basicIdxs, nonB
|
|||||||
return -1, -1, ErrBland
|
return -1, -1, ErrBland
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyInputs(initialBasic []int, c []float64, A mat64.Matrix, b []float64) error {
|
func verifyInputs(initialBasic []int, c []float64, A mat.Matrix, b []float64) error {
|
||||||
m, n := A.Dims()
|
m, n := A.Dims()
|
||||||
if len(c) != n {
|
if len(c) != n {
|
||||||
panic("lp: c vector incorrect length")
|
panic("lp: c vector incorrect length")
|
||||||
@@ -426,14 +426,14 @@ func verifyInputs(initialBasic []int, c []float64, A mat64.Matrix, b []float64)
|
|||||||
//
|
//
|
||||||
// If the columns of A are not linearly independent or if the initial set is not
|
// If the columns of A are not linearly independent or if the initial set is not
|
||||||
// feasible, an error is returned.
|
// feasible, an error is returned.
|
||||||
func initializeFromBasic(xb []float64, ab *mat64.Dense, b []float64) error {
|
func initializeFromBasic(xb []float64, ab *mat.Dense, b []float64) error {
|
||||||
m, _ := ab.Dims()
|
m, _ := ab.Dims()
|
||||||
if len(xb) != m {
|
if len(xb) != m {
|
||||||
panic("simplex: bad xb length")
|
panic("simplex: bad xb length")
|
||||||
}
|
}
|
||||||
xbMat := mat64.NewVector(m, xb)
|
xbMat := mat.NewVector(m, xb)
|
||||||
|
|
||||||
err := xbMat.SolveVec(ab, mat64.NewVector(m, b))
|
err := xbMat.SolveVec(ab, mat.NewVector(m, b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.New("lp: subcolumns of A for supplied initial basic singular")
|
return errors.New("lp: subcolumns of A for supplied initial basic singular")
|
||||||
}
|
}
|
||||||
@@ -453,7 +453,7 @@ func initializeFromBasic(xb []float64, ab *mat64.Dense, b []float64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// extractColumns copies the columns specified by cols into the columns of dst.
|
// extractColumns copies the columns specified by cols into the columns of dst.
|
||||||
func extractColumns(dst *mat64.Dense, A mat64.Matrix, cols []int) {
|
func extractColumns(dst *mat.Dense, A mat.Matrix, cols []int) {
|
||||||
r, c := dst.Dims()
|
r, c := dst.Dims()
|
||||||
ra, _ := A.Dims()
|
ra, _ := A.Dims()
|
||||||
if ra != r {
|
if ra != r {
|
||||||
@@ -464,14 +464,14 @@ func extractColumns(dst *mat64.Dense, A mat64.Matrix, cols []int) {
|
|||||||
}
|
}
|
||||||
col := make([]float64, r)
|
col := make([]float64, r)
|
||||||
for j, idx := range cols {
|
for j, idx := range cols {
|
||||||
mat64.Col(col, idx, A)
|
mat.Col(col, idx, A)
|
||||||
dst.SetCol(j, col)
|
dst.SetCol(j, col)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// findInitialBasic finds an initial basic solution, and returns the basic
|
// findInitialBasic finds an initial basic solution, and returns the basic
|
||||||
// indices, ab, and xb.
|
// indices, ab, and xb.
|
||||||
func findInitialBasic(A mat64.Matrix, b []float64) ([]int, *mat64.Dense, []float64, error) {
|
func findInitialBasic(A mat.Matrix, b []float64) ([]int, *mat.Dense, []float64, error) {
|
||||||
m, n := A.Dims()
|
m, n := A.Dims()
|
||||||
basicIdxs := findLinearlyIndependent(A)
|
basicIdxs := findLinearlyIndependent(A)
|
||||||
if len(basicIdxs) != m {
|
if len(basicIdxs) != m {
|
||||||
@@ -480,7 +480,7 @@ func findInitialBasic(A mat64.Matrix, b []float64) ([]int, *mat64.Dense, []float
|
|||||||
|
|
||||||
// It may be that this linearly independent basis is also a feasible set. If
|
// It may be that this linearly independent basis is also a feasible set. If
|
||||||
// so, the Phase I problem can be avoided.
|
// so, the Phase I problem can be avoided.
|
||||||
ab := mat64.NewDense(m, len(basicIdxs), nil)
|
ab := mat.NewDense(m, len(basicIdxs), nil)
|
||||||
extractColumns(ab, A, basicIdxs)
|
extractColumns(ab, A, basicIdxs)
|
||||||
xb := make([]float64, m)
|
xb := make([]float64, m)
|
||||||
err := initializeFromBasic(xb, ab, b)
|
err := initializeFromBasic(xb, ab, b)
|
||||||
@@ -519,7 +519,7 @@ func findInitialBasic(A mat64.Matrix, b []float64) ([]int, *mat64.Dense, []float
|
|||||||
if i == minIdx {
|
if i == minIdx {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
mat64.Col(col, v, A)
|
mat.Col(col, v, A)
|
||||||
floats.Sub(aX1, col)
|
floats.Sub(aX1, col)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -527,7 +527,7 @@ func findInitialBasic(A mat64.Matrix, b []float64) ([]int, *mat64.Dense, []float
|
|||||||
// aNew = [A, a_{n+1}]
|
// aNew = [A, a_{n+1}]
|
||||||
// bNew = b
|
// bNew = b
|
||||||
// cNew = 1 for x_{n+1}
|
// cNew = 1 for x_{n+1}
|
||||||
aNew := mat64.NewDense(m, n+1, nil)
|
aNew := mat.NewDense(m, n+1, nil)
|
||||||
aNew.Copy(A)
|
aNew.Copy(A)
|
||||||
aNew.SetCol(n, aX1)
|
aNew.SetCol(n, aX1)
|
||||||
basicIdxs[minIdx] = n // swap minIdx with n in the basic set.
|
basicIdxs[minIdx] = n // swap minIdx with n in the basic set.
|
||||||
@@ -574,7 +574,7 @@ func findInitialBasic(A mat64.Matrix, b []float64) ([]int, *mat64.Dense, []float
|
|||||||
}
|
}
|
||||||
newBasic[addedIdx] = i
|
newBasic[addedIdx] = i
|
||||||
if set {
|
if set {
|
||||||
mat64.Col(col, i, A)
|
mat.Col(col, i, A)
|
||||||
ab.SetCol(addedIdx, col)
|
ab.SetCol(addedIdx, col)
|
||||||
} else {
|
} else {
|
||||||
extractColumns(ab, A, newBasic)
|
extractColumns(ab, A, newBasic)
|
||||||
@@ -590,10 +590,10 @@ func findInitialBasic(A mat64.Matrix, b []float64) ([]int, *mat64.Dense, []float
|
|||||||
|
|
||||||
// findLinearlyIndependnt finds a set of linearly independent columns of A, and
|
// findLinearlyIndependnt finds a set of linearly independent columns of A, and
|
||||||
// returns the column indexes of the linearly independent columns.
|
// returns the column indexes of the linearly independent columns.
|
||||||
func findLinearlyIndependent(A mat64.Matrix) []int {
|
func findLinearlyIndependent(A mat.Matrix) []int {
|
||||||
m, n := A.Dims()
|
m, n := A.Dims()
|
||||||
idxs := make([]int, 0, m)
|
idxs := make([]int, 0, m)
|
||||||
columns := mat64.NewDense(m, m, nil)
|
columns := mat.NewDense(m, m, nil)
|
||||||
newCol := make([]float64, m)
|
newCol := make([]float64, m)
|
||||||
// Walk in reverse order because slack variables are typically the last columns
|
// Walk in reverse order because slack variables are typically the last columns
|
||||||
// of A.
|
// of A.
|
||||||
@@ -601,7 +601,7 @@ func findLinearlyIndependent(A mat64.Matrix) []int {
|
|||||||
if len(idxs) == m {
|
if len(idxs) == m {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
mat64.Col(newCol, i, A)
|
mat.Col(newCol, i, A)
|
||||||
columns.SetCol(len(idxs), newCol)
|
columns.SetCol(len(idxs), newCol)
|
||||||
if len(idxs) == 0 {
|
if len(idxs) == 0 {
|
||||||
// A column is linearly independent from the null set.
|
// A column is linearly independent from the null set.
|
||||||
@@ -609,7 +609,7 @@ func findLinearlyIndependent(A mat64.Matrix) []int {
|
|||||||
idxs = append(idxs, i)
|
idxs = append(idxs, i)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if mat64.Cond(columns.View(0, 0, m, len(idxs)+1), 1) > 1e12 {
|
if mat.Cond(columns.View(0, 0, m, len(idxs)+1), 1) > 1e12 {
|
||||||
// Not linearly independent.
|
// Not linearly independent.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
File diff suppressed because one or more lines are too long
@@ -8,13 +8,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/optimize/convex/lp"
|
"gonum.org/v1/gonum/optimize/convex/lp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleSimplex() {
|
func ExampleSimplex() {
|
||||||
c := []float64{-1, -2, 0, 0}
|
c := []float64{-1, -2, 0, 0}
|
||||||
A := mat64.NewDense(2, 4, []float64{-1, 2, 1, 0, 3, 1, 0, 1})
|
A := mat.NewDense(2, 4, []float64{-1, 2, 1, 0, 3, 1, 0, 1})
|
||||||
b := []float64{4, 9}
|
b := []float64{4, 9}
|
||||||
|
|
||||||
opt, x, err := lp.Simplex(c, A, b, 0, nil)
|
opt, x, err := lp.Simplex(c, A, b, 0, nil)
|
||||||
|
@@ -8,7 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Beale implements the Beale's function.
|
// Beale implements the Beale's function.
|
||||||
@@ -56,7 +56,7 @@ func (Beale) Grad(grad, x []float64) {
|
|||||||
grad[1] = 2 * x[0] * (f1 + 2*f2*x[1] + 3*f3*x[1]*x[1])
|
grad[1] = 2 * x[0] * (f1 + 2*f2*x[1] + 3*f3*x[1]*x[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Beale) Hess(hess mat64.MutableSymmetric, x []float64) {
|
func (Beale) Hess(hess mat.MutableSymmetric, x []float64) {
|
||||||
if len(x) != 2 {
|
if len(x) != 2 {
|
||||||
panic("dimension of the problem must be 2")
|
panic("dimension of the problem must be 2")
|
||||||
}
|
}
|
||||||
@@ -518,7 +518,7 @@ func (BrownBadlyScaled) Grad(grad, x []float64) {
|
|||||||
grad[1] = 2*f2 + 2*f3*x[0]
|
grad[1] = 2*f2 + 2*f3*x[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (BrownBadlyScaled) Hess(hess mat64.MutableSymmetric, x []float64) {
|
func (BrownBadlyScaled) Hess(hess mat.MutableSymmetric, x []float64) {
|
||||||
if len(x) != 2 {
|
if len(x) != 2 {
|
||||||
panic("dimension of the problem must be 2")
|
panic("dimension of the problem must be 2")
|
||||||
}
|
}
|
||||||
@@ -595,7 +595,7 @@ func (BrownAndDennis) Grad(grad, x []float64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (BrownAndDennis) Hess(hess mat64.MutableSymmetric, x []float64) {
|
func (BrownAndDennis) Hess(hess mat.MutableSymmetric, x []float64) {
|
||||||
if len(x) != 4 {
|
if len(x) != 4 {
|
||||||
panic("dimension of the problem must be 4")
|
panic("dimension of the problem must be 4")
|
||||||
}
|
}
|
||||||
@@ -1228,7 +1228,7 @@ func (PowellBadlyScaled) Grad(grad, x []float64) {
|
|||||||
grad[1] = 2 * (1e4*f1*x[0] - f2*math.Exp(-x[1]))
|
grad[1] = 2 * (1e4*f1*x[0] - f2*math.Exp(-x[1]))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PowellBadlyScaled) Hess(hess mat64.MutableSymmetric, x []float64) {
|
func (PowellBadlyScaled) Hess(hess mat.MutableSymmetric, x []float64) {
|
||||||
if len(x) != 2 {
|
if len(x) != 2 {
|
||||||
panic("dimension of the problem must be 2")
|
panic("dimension of the problem must be 2")
|
||||||
}
|
}
|
||||||
@@ -1478,7 +1478,7 @@ func (Watson) Grad(grad, x []float64) {
|
|||||||
grad[1] += 2 * t
|
grad[1] += 2 * t
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Watson) Hess(hess mat64.MutableSymmetric, x []float64) {
|
func (Watson) Hess(hess mat.MutableSymmetric, x []float64) {
|
||||||
dim := len(x)
|
dim := len(x)
|
||||||
if dim != hess.Symmetric() {
|
if dim != hess.Symmetric() {
|
||||||
panic("incorrect size of the Hessian")
|
panic("incorrect size of the Hessian")
|
||||||
@@ -1598,7 +1598,7 @@ func (Wood) Grad(grad, x []float64) {
|
|||||||
grad[3] = 2 * (90*f3 + 10*f5 - 0.1*f6)
|
grad[3] = 2 * (90*f3 + 10*f5 - 0.1*f6)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Wood) Hess(hess mat64.MutableSymmetric, x []float64) {
|
func (Wood) Hess(hess mat.MutableSymmetric, x []float64) {
|
||||||
if len(x) != 4 {
|
if len(x) != 4 {
|
||||||
panic("dimension of the problem must be 4")
|
panic("dimension of the problem must be 4")
|
||||||
}
|
}
|
||||||
|
@@ -7,7 +7,7 @@ package optimize
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/optimize/functions"
|
"gonum.org/v1/gonum/optimize/functions"
|
||||||
"gonum.org/v1/gonum/stat/distmv"
|
"gonum.org/v1/gonum/stat/distmv"
|
||||||
)
|
)
|
||||||
@@ -18,7 +18,7 @@ func TestGuessAndCheck(t *testing.T) {
|
|||||||
Func: functions.ExtendedRosenbrock{}.Func,
|
Func: functions.ExtendedRosenbrock{}.Func,
|
||||||
}
|
}
|
||||||
mu := make([]float64, dim)
|
mu := make([]float64, dim)
|
||||||
sigma := mat64.NewSymDense(dim, nil)
|
sigma := mat.NewSymDense(dim, nil)
|
||||||
for i := 0; i < dim; i++ {
|
for i := 0; i < dim; i++ {
|
||||||
sigma.SetSym(i, i, 1)
|
sigma.SetSym(i, i, 1)
|
||||||
}
|
}
|
||||||
|
@@ -10,7 +10,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// newLocation allocates a new locatian structure of the appropriate size. It
|
// newLocation allocates a new locatian structure of the appropriate size. It
|
||||||
@@ -26,7 +26,7 @@ func newLocation(dim int, method Needser) *Location {
|
|||||||
loc.Gradient = make([]float64, dim)
|
loc.Gradient = make([]float64, dim)
|
||||||
}
|
}
|
||||||
if method.Needs().Hessian {
|
if method.Needs().Hessian {
|
||||||
loc.Hessian = mat64.NewSymDense(dim, nil)
|
loc.Hessian = mat.NewSymDense(dim, nil)
|
||||||
}
|
}
|
||||||
return loc
|
return loc
|
||||||
}
|
}
|
||||||
@@ -42,7 +42,7 @@ func copyLocation(dst, src *Location) {
|
|||||||
|
|
||||||
if src.Hessian != nil {
|
if src.Hessian != nil {
|
||||||
if dst.Hessian == nil || dst.Hessian.Symmetric() != len(src.X) {
|
if dst.Hessian == nil || dst.Hessian.Symmetric() != len(src.X) {
|
||||||
dst.Hessian = mat64.NewSymDense(len(src.X), nil)
|
dst.Hessian = mat.NewSymDense(len(src.X), nil)
|
||||||
}
|
}
|
||||||
dst.Hessian.CopySym(src.Hessian)
|
dst.Hessian.CopySym(src.Hessian)
|
||||||
}
|
}
|
||||||
|
@@ -7,7 +7,7 @@ package optimize
|
|||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxNewtonModifications = 20
|
const maxNewtonModifications = 20
|
||||||
@@ -48,8 +48,8 @@ type Newton struct {
|
|||||||
|
|
||||||
ls *LinesearchMethod
|
ls *LinesearchMethod
|
||||||
|
|
||||||
hess *mat64.SymDense // Storage for a copy of the Hessian matrix.
|
hess *mat.SymDense // Storage for a copy of the Hessian matrix.
|
||||||
chol mat64.Cholesky // Storage for the Cholesky factorization.
|
chol mat.Cholesky // Storage for the Cholesky factorization.
|
||||||
tau float64
|
tau float64
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,8 +88,8 @@ func (n *Newton) NextDirection(loc *Location, dir []float64) (stepSize float64)
|
|||||||
// the Identity) from Nocedal, Wright (2006), 2nd edition.
|
// the Identity) from Nocedal, Wright (2006), 2nd edition.
|
||||||
|
|
||||||
dim := len(loc.X)
|
dim := len(loc.X)
|
||||||
d := mat64.NewVector(dim, dir)
|
d := mat.NewVector(dim, dir)
|
||||||
grad := mat64.NewVector(dim, loc.Gradient)
|
grad := mat.NewVector(dim, loc.Gradient)
|
||||||
n.hess.CopySym(loc.Hessian)
|
n.hess.CopySym(loc.Hessian)
|
||||||
|
|
||||||
// Find the smallest diagonal entry of the Hessian.
|
// Find the smallest diagonal entry of the Hessian.
|
||||||
|
@@ -10,7 +10,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultGradientAbsTol = 1e-6
|
const defaultGradientAbsTol = 1e-6
|
||||||
@@ -83,7 +83,7 @@ type Location struct {
|
|||||||
X []float64
|
X []float64
|
||||||
F float64
|
F float64
|
||||||
Gradient []float64
|
Gradient []float64
|
||||||
Hessian *mat64.SymDense
|
Hessian *mat.SymDense
|
||||||
}
|
}
|
||||||
|
|
||||||
// Result represents the answer of an optimization run. It contains the optimum
|
// Result represents the answer of an optimization run. It contains the optimum
|
||||||
@@ -131,7 +131,7 @@ type Problem struct {
|
|||||||
|
|
||||||
// Hess evaluates the Hessian at x and stores the result in-place in hess.
|
// Hess evaluates the Hessian at x and stores the result in-place in hess.
|
||||||
// Hess must not modify x.
|
// Hess must not modify x.
|
||||||
Hess func(hess mat64.MutableSymmetric, x []float64)
|
Hess func(hess mat.MutableSymmetric, x []float64)
|
||||||
|
|
||||||
// Status reports the status of the objective function being optimized and any
|
// Status reports the status of the objective function being optimized and any
|
||||||
// error. This can be used to terminate early, for example when the function is
|
// error. This can be used to terminate early, for example when the function is
|
||||||
@@ -161,10 +161,10 @@ func (p Problem) satisfies(method Needser) error {
|
|||||||
//
|
//
|
||||||
// If Recorder is nil, no information will be recorded.
|
// If Recorder is nil, no information will be recorded.
|
||||||
type Settings struct {
|
type Settings struct {
|
||||||
UseInitialData bool // Use supplied information about the conditions at the initial x.
|
UseInitialData bool // Use supplied information about the conditions at the initial x.
|
||||||
InitialValue float64 // Function value at the initial x.
|
InitialValue float64 // Function value at the initial x.
|
||||||
InitialGradient []float64 // Gradient at the initial x.
|
InitialGradient []float64 // Gradient at the initial x.
|
||||||
InitialHessian *mat64.SymDense // Hessian at the initial x.
|
InitialHessian *mat.SymDense // Hessian at the initial x.
|
||||||
|
|
||||||
// FunctionThreshold is the threshold for acceptably small values of the
|
// FunctionThreshold is the threshold for acceptably small values of the
|
||||||
// objective function. FunctionThreshold status is returned if
|
// objective function. FunctionThreshold status is returned if
|
||||||
@@ -254,9 +254,9 @@ func resize(x []float64, dim int) []float64 {
|
|||||||
return x[:dim]
|
return x[:dim]
|
||||||
}
|
}
|
||||||
|
|
||||||
func resizeSymDense(m *mat64.SymDense, dim int) *mat64.SymDense {
|
func resizeSymDense(m *mat.SymDense, dim int) *mat.SymDense {
|
||||||
if m == nil || cap(m.RawSymmetric().Data) < dim*dim {
|
if m == nil || cap(m.RawSymmetric().Data) < dim*dim {
|
||||||
return mat64.NewSymDense(dim, nil)
|
return mat.NewSymDense(dim, nil)
|
||||||
}
|
}
|
||||||
return mat64.NewSymDense(dim, m.RawSymmetric().Data[:dim*dim])
|
return mat.NewSymDense(dim, m.RawSymmetric().Data[:dim*dim])
|
||||||
}
|
}
|
||||||
|
@@ -10,7 +10,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/optimize/functions"
|
"gonum.org/v1/gonum/optimize/functions"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1237,7 +1237,7 @@ func testLocal(t *testing.T, tests []unconstrainedTest, method Method) {
|
|||||||
test.p.Grad(settings.InitialGradient, test.x)
|
test.p.Grad(settings.InitialGradient, test.x)
|
||||||
}
|
}
|
||||||
if method.Needs().Hessian {
|
if method.Needs().Hessian {
|
||||||
settings.InitialHessian = mat64.NewSymDense(len(test.x), nil)
|
settings.InitialHessian = mat.NewSymDense(len(test.x), nil)
|
||||||
test.p.Hess(settings.InitialHessian, test.x)
|
test.p.Hess(settings.InitialHessian, test.x)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
package stat_test
|
package stat_test
|
||||||
|
|
||||||
import "gonum.org/v1/gonum/matrix/mat64"
|
import "gonum.org/v1/gonum/mat"
|
||||||
|
|
||||||
// Boston Housing Data of Harrison and Rubinfeld (1978)
|
// Boston Housing Data of Harrison and Rubinfeld (1978)
|
||||||
// http://dx.doi.org/10.1016/0095-0696(78)90006-2
|
// http://dx.doi.org/10.1016/0095-0696(78)90006-2
|
||||||
@@ -21,7 +21,7 @@ import "gonum.org/v1/gonum/matrix/mat64"
|
|||||||
// proportion of owner-occupied units built prior to 1940,
|
// proportion of owner-occupied units built prior to 1940,
|
||||||
// full-value property-tax rate per $10000,
|
// full-value property-tax rate per $10000,
|
||||||
// median value of owner-occupied homes in $1000s.
|
// median value of owner-occupied homes in $1000s.
|
||||||
var bostonData = mat64.NewDense(506, 11, []float64{
|
var bostonData = mat.NewDense(506, 11, []float64{
|
||||||
0.00632, 2.31000, 0.53800, 4.09000, 1.00000, 15.30000, 396.90000, 6.57500, 65.20000, 296.00000, 24.00000,
|
0.00632, 2.31000, 0.53800, 4.09000, 1.00000, 15.30000, 396.90000, 6.57500, 65.20000, 296.00000, 24.00000,
|
||||||
0.02731, 7.07000, 0.46900, 4.96710, 2.00000, 17.80000, 396.90000, 6.42100, 78.90000, 242.00000, 21.60000,
|
0.02731, 7.07000, 0.46900, 4.96710, 2.00000, 17.80000, 396.90000, 6.42100, 78.90000, 242.00000, 21.60000,
|
||||||
0.02729, 7.07000, 0.46900, 4.96710, 2.00000, 17.80000, 392.83000, 7.18500, 61.10000, 242.00000, 34.70000,
|
0.02729, 7.07000, 0.46900, 4.96710, 2.00000, 17.80000, 392.83000, 7.18500, 61.10000, 242.00000, 34.70000,
|
||||||
|
@@ -4,13 +4,13 @@
|
|||||||
|
|
||||||
package stat_test
|
package stat_test
|
||||||
|
|
||||||
import "gonum.org/v1/gonum/matrix/mat64"
|
import "gonum.org/v1/gonum/mat"
|
||||||
|
|
||||||
// ASA Car Exposition Data of Ramos and Donoho (1983)
|
// ASA Car Exposition Data of Ramos and Donoho (1983)
|
||||||
// http://lib.stat.cmu.edu/datasets/cars.desc
|
// http://lib.stat.cmu.edu/datasets/cars.desc
|
||||||
// http://lib.stat.cmu.edu/datasets/cars.data
|
// http://lib.stat.cmu.edu/datasets/cars.data
|
||||||
// Columns are: displacement, horsepower, weight, acceleration, MPG.
|
// Columns are: displacement, horsepower, weight, acceleration, MPG.
|
||||||
var carData = mat64.NewDense(392, 5, []float64{
|
var carData = mat.NewDense(392, 5, []float64{
|
||||||
307.0, 130.0, 3504.0, 12.0, 18.0,
|
307.0, 130.0, 3504.0, 12.0, 18.0,
|
||||||
350.0, 165.0, 3693.0, 11.5, 15.0,
|
350.0, 165.0, 3693.0, 11.5, 15.0,
|
||||||
318.0, 150.0, 3436.0, 11.0, 18.0,
|
318.0, 150.0, 3436.0, 11.0, 18.0,
|
||||||
|
@@ -9,13 +9,13 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// symView is a helper for getting a View of a SymDense.
|
// symView is a helper for getting a View of a SymDense.
|
||||||
type symView struct {
|
type symView struct {
|
||||||
sym *mat64.SymDense
|
sym *mat.SymDense
|
||||||
|
|
||||||
i, j, r, c int
|
i, j, r, c int
|
||||||
}
|
}
|
||||||
@@ -32,7 +32,7 @@ func (s symView) At(i, j int) float64 {
|
|||||||
return s.sym.At(s.i+i, s.j+j)
|
return s.sym.At(s.i+i, s.j+j)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s symView) T() mat64.Matrix { return mat64.Transpose{s} }
|
func (s symView) T() mat.Matrix { return mat.Transpose{s} }
|
||||||
|
|
||||||
func ExampleCC() {
|
func ExampleCC() {
|
||||||
// This example is directly analogous to Example 3.5 on page 87 of
|
// This example is directly analogous to Example 3.5 on page 87 of
|
||||||
@@ -65,7 +65,7 @@ func ExampleCC() {
|
|||||||
ydata := bostonData.Slice(0, n, xd, xd+yd)
|
ydata := bostonData.Slice(0, n, xd, xd+yd)
|
||||||
|
|
||||||
// For comparison, calculate the correlation matrix for the original data.
|
// For comparison, calculate the correlation matrix for the original data.
|
||||||
var cor mat64.SymDense
|
var cor mat.SymDense
|
||||||
stat.CorrelationMatrix(&cor, bostonData, nil)
|
stat.CorrelationMatrix(&cor, bostonData, nil)
|
||||||
|
|
||||||
// Extract just those correlations that are between xdata and ydata.
|
// Extract just those correlations that are between xdata and ydata.
|
||||||
@@ -75,7 +75,7 @@ func ExampleCC() {
|
|||||||
// between the 5th variable of xdata (index of accessibility to radial
|
// between the 5th variable of xdata (index of accessibility to radial
|
||||||
// highways) and the 3rd variable of ydata (full-value property-tax rate per
|
// highways) and the 3rd variable of ydata (full-value property-tax rate per
|
||||||
// $10000).
|
// $10000).
|
||||||
fmt.Printf("corRaw = %.4f", mat64.Formatted(corRaw, mat64.Prefix(" ")))
|
fmt.Printf("corRaw = %.4f", mat.Formatted(corRaw, mat.Prefix(" ")))
|
||||||
|
|
||||||
// Calculate the canonical correlations.
|
// Calculate the canonical correlations.
|
||||||
var cc stat.CC
|
var cc stat.CC
|
||||||
@@ -93,16 +93,16 @@ func ExampleCC() {
|
|||||||
|
|
||||||
// Canonical Correlation Matrix, or the correlations between the sphered
|
// Canonical Correlation Matrix, or the correlations between the sphered
|
||||||
// data.
|
// data.
|
||||||
var corSph mat64.Dense
|
var corSph mat.Dense
|
||||||
corSph.Clone(pVecs)
|
corSph.Clone(pVecs)
|
||||||
col := make([]float64, xd)
|
col := make([]float64, xd)
|
||||||
for j := 0; j < yd; j++ {
|
for j := 0; j < yd; j++ {
|
||||||
mat64.Col(col, j, &corSph)
|
mat.Col(col, j, &corSph)
|
||||||
floats.Scale(ccors[j], col)
|
floats.Scale(ccors[j], col)
|
||||||
corSph.SetCol(j, col)
|
corSph.SetCol(j, col)
|
||||||
}
|
}
|
||||||
corSph.Product(&corSph, qVecs.T())
|
corSph.Product(&corSph, qVecs.T())
|
||||||
fmt.Printf("\n\ncorSph = %.4f", mat64.Formatted(&corSph, mat64.Prefix(" ")))
|
fmt.Printf("\n\ncorSph = %.4f", mat.Formatted(&corSph, mat.Prefix(" ")))
|
||||||
|
|
||||||
// Canonical Correlations. Note that the first canonical correlation is
|
// Canonical Correlations. Note that the first canonical correlation is
|
||||||
// 0.95, stronger than the greatest correlation in the original data, and
|
// 0.95, stronger than the greatest correlation in the original data, and
|
||||||
@@ -110,13 +110,13 @@ func ExampleCC() {
|
|||||||
fmt.Printf("\n\nccors = %.4f", ccors)
|
fmt.Printf("\n\nccors = %.4f", ccors)
|
||||||
|
|
||||||
// Left and right eigenvectors of the canonical correlation matrix.
|
// Left and right eigenvectors of the canonical correlation matrix.
|
||||||
fmt.Printf("\n\npVecs = %.4f", mat64.Formatted(pVecs, mat64.Prefix(" ")))
|
fmt.Printf("\n\npVecs = %.4f", mat.Formatted(pVecs, mat.Prefix(" ")))
|
||||||
fmt.Printf("\n\nqVecs = %.4f", mat64.Formatted(qVecs, mat64.Prefix(" ")))
|
fmt.Printf("\n\nqVecs = %.4f", mat.Formatted(qVecs, mat.Prefix(" ")))
|
||||||
|
|
||||||
// Canonical Correlation Transforms. These can be useful as they represent
|
// Canonical Correlation Transforms. These can be useful as they represent
|
||||||
// the canonical variables as linear combinations of the original variables.
|
// the canonical variables as linear combinations of the original variables.
|
||||||
fmt.Printf("\n\nphiVs = %.4f", mat64.Formatted(phiVs, mat64.Prefix(" ")))
|
fmt.Printf("\n\nphiVs = %.4f", mat.Formatted(phiVs, mat.Prefix(" ")))
|
||||||
fmt.Printf("\n\npsiVs = %.4f", mat64.Formatted(psiVs, mat64.Prefix(" ")))
|
fmt.Printf("\n\npsiVs = %.4f", mat.Formatted(psiVs, mat.Prefix(" ")))
|
||||||
|
|
||||||
// Output:
|
// Output:
|
||||||
// corRaw = ⎡-0.2192 0.3527 0.5828 -0.3883⎤
|
// corRaw = ⎡-0.2192 0.3527 0.5828 -0.3883⎤
|
||||||
|
@@ -8,26 +8,26 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCanonicalCorrelations(t *testing.T) {
|
func TestCanonicalCorrelations(t *testing.T) {
|
||||||
tests:
|
tests:
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
xdata mat64.Matrix
|
xdata mat.Matrix
|
||||||
ydata mat64.Matrix
|
ydata mat.Matrix
|
||||||
weights []float64
|
weights []float64
|
||||||
wantCorrs []float64
|
wantCorrs []float64
|
||||||
wantpVecs *mat64.Dense
|
wantpVecs *mat.Dense
|
||||||
wantqVecs *mat64.Dense
|
wantqVecs *mat.Dense
|
||||||
wantphiVs *mat64.Dense
|
wantphiVs *mat.Dense
|
||||||
wantpsiVs *mat64.Dense
|
wantpsiVs *mat.Dense
|
||||||
epsilon float64
|
epsilon float64
|
||||||
}{
|
}{
|
||||||
// Test results verified using R.
|
// Test results verified using R.
|
||||||
{ // Truncated iris data, Sepal vs Petal measurements.
|
{ // Truncated iris data, Sepal vs Petal measurements.
|
||||||
xdata: mat64.NewDense(10, 2, []float64{
|
xdata: mat.NewDense(10, 2, []float64{
|
||||||
5.1, 3.5,
|
5.1, 3.5,
|
||||||
4.9, 3.0,
|
4.9, 3.0,
|
||||||
4.7, 3.2,
|
4.7, 3.2,
|
||||||
@@ -39,7 +39,7 @@ tests:
|
|||||||
4.4, 2.9,
|
4.4, 2.9,
|
||||||
4.9, 3.1,
|
4.9, 3.1,
|
||||||
}),
|
}),
|
||||||
ydata: mat64.NewDense(10, 2, []float64{
|
ydata: mat.NewDense(10, 2, []float64{
|
||||||
1.4, 0.2,
|
1.4, 0.2,
|
||||||
1.4, 0.2,
|
1.4, 0.2,
|
||||||
1.3, 0.2,
|
1.3, 0.2,
|
||||||
@@ -52,19 +52,19 @@ tests:
|
|||||||
1.5, 0.1,
|
1.5, 0.1,
|
||||||
}),
|
}),
|
||||||
wantCorrs: []float64{0.7250624174504773, 0.5547679185730191},
|
wantCorrs: []float64{0.7250624174504773, 0.5547679185730191},
|
||||||
wantpVecs: mat64.NewDense(2, 2, []float64{
|
wantpVecs: mat.NewDense(2, 2, []float64{
|
||||||
0.0765914610875867, 0.9970625597666721,
|
0.0765914610875867, 0.9970625597666721,
|
||||||
0.9970625597666721, -0.0765914610875868,
|
0.9970625597666721, -0.0765914610875868,
|
||||||
}),
|
}),
|
||||||
wantqVecs: mat64.NewDense(2, 2, []float64{
|
wantqVecs: mat.NewDense(2, 2, []float64{
|
||||||
0.3075184850910837, 0.9515421069649439,
|
0.3075184850910837, 0.9515421069649439,
|
||||||
0.9515421069649439, -0.3075184850910837,
|
0.9515421069649439, -0.3075184850910837,
|
||||||
}),
|
}),
|
||||||
wantphiVs: mat64.NewDense(2, 2, []float64{
|
wantphiVs: mat.NewDense(2, 2, []float64{
|
||||||
-1.9794877596804641, 5.2016325219025124,
|
-1.9794877596804641, 5.2016325219025124,
|
||||||
4.5211829944066553, -2.7263663170835697,
|
4.5211829944066553, -2.7263663170835697,
|
||||||
}),
|
}),
|
||||||
wantpsiVs: mat64.NewDense(2, 2, []float64{
|
wantpsiVs: mat.NewDense(2, 2, []float64{
|
||||||
-0.0613084818030103, 10.8514169865438941,
|
-0.0613084818030103, 10.8514169865438941,
|
||||||
12.7209032660734298, -7.6793888180353775,
|
12.7209032660734298, -7.6793888180353775,
|
||||||
}),
|
}),
|
||||||
@@ -79,21 +79,21 @@ tests:
|
|||||||
// Acceleration, MPG
|
// Acceleration, MPG
|
||||||
ydata: carData.Slice(0, 392, 3, 5),
|
ydata: carData.Slice(0, 392, 3, 5),
|
||||||
wantCorrs: []float64{0.8782187384352336, 0.6328187219216761},
|
wantCorrs: []float64{0.8782187384352336, 0.6328187219216761},
|
||||||
wantpVecs: mat64.NewDense(3, 2, []float64{
|
wantpVecs: mat.NewDense(3, 2, []float64{
|
||||||
0.3218296374829181, 0.3947540257657075,
|
0.3218296374829181, 0.3947540257657075,
|
||||||
0.4162807660635797, 0.7573719053303306,
|
0.4162807660635797, 0.7573719053303306,
|
||||||
0.8503740401982725, -0.5201509936144236,
|
0.8503740401982725, -0.5201509936144236,
|
||||||
}),
|
}),
|
||||||
wantqVecs: mat64.NewDense(2, 2, []float64{
|
wantqVecs: mat.NewDense(2, 2, []float64{
|
||||||
-0.5161984172278830, -0.8564690269072364,
|
-0.5161984172278830, -0.8564690269072364,
|
||||||
-0.8564690269072364, 0.5161984172278830,
|
-0.8564690269072364, 0.5161984172278830,
|
||||||
}),
|
}),
|
||||||
wantphiVs: mat64.NewDense(3, 2, []float64{
|
wantphiVs: mat.NewDense(3, 2, []float64{
|
||||||
0.0025033152994308, 0.0047795464118615,
|
0.0025033152994308, 0.0047795464118615,
|
||||||
0.0201923608080173, 0.0409150208725958,
|
0.0201923608080173, 0.0409150208725958,
|
||||||
-0.0000247374128745, -0.0026766435161875,
|
-0.0000247374128745, -0.0026766435161875,
|
||||||
}),
|
}),
|
||||||
wantpsiVs: mat64.NewDense(2, 2, []float64{
|
wantpsiVs: mat.NewDense(2, 2, []float64{
|
||||||
-0.1666196759760772, -0.3637393866139658,
|
-0.1666196759760772, -0.3637393866139658,
|
||||||
-0.0915512109649727, 0.1077863777929168,
|
-0.0915512109649727, 0.1077863777929168,
|
||||||
}),
|
}),
|
||||||
@@ -116,7 +116,7 @@ tests:
|
|||||||
// Median value of owner-occupied homes in $1000s
|
// Median value of owner-occupied homes in $1000s
|
||||||
ydata: bostonData.Slice(0, 506, 7, 11),
|
ydata: bostonData.Slice(0, 506, 7, 11),
|
||||||
wantCorrs: []float64{0.9451239443886021, 0.6786622733370654, 0.5714338361583764, 0.2009739704710440},
|
wantCorrs: []float64{0.9451239443886021, 0.6786622733370654, 0.5714338361583764, 0.2009739704710440},
|
||||||
wantpVecs: mat64.NewDense(7, 4, []float64{
|
wantpVecs: mat.NewDense(7, 4, []float64{
|
||||||
-0.2574391924541903, 0.0158477516621194, 0.2122169934631024, -0.0945733803894706,
|
-0.2574391924541903, 0.0158477516621194, 0.2122169934631024, -0.0945733803894706,
|
||||||
-0.4836594430018478, 0.3837101908138468, 0.1474448317415911, 0.6597324886718275,
|
-0.4836594430018478, 0.3837101908138468, 0.1474448317415911, 0.6597324886718275,
|
||||||
-0.0800776365873296, 0.3493556742809252, 0.3287336458109373, -0.2862040444334655,
|
-0.0800776365873296, 0.3493556742809252, 0.3287336458109373, -0.2862040444334655,
|
||||||
@@ -125,13 +125,13 @@ tests:
|
|||||||
-0.0990903250057199, 0.0503411215453873, 0.6384330631742202, 0.1022367136218303,
|
-0.0990903250057199, 0.0503411215453873, 0.6384330631742202, 0.1022367136218303,
|
||||||
0.4260459963765036, 0.0323334351308141, -0.2289527516030810, 0.6419232947608805,
|
0.4260459963765036, 0.0323334351308141, -0.2289527516030810, 0.6419232947608805,
|
||||||
}),
|
}),
|
||||||
wantqVecs: mat64.NewDense(4, 4, []float64{
|
wantqVecs: mat.NewDense(4, 4, []float64{
|
||||||
0.0181660502363264, -0.1583489460479038, -0.0066723577642883, -0.9871935400650649,
|
0.0181660502363264, -0.1583489460479038, -0.0066723577642883, -0.9871935400650649,
|
||||||
-0.2347699045986119, 0.9483314614936594, -0.1462420505631345, -0.1554470767919033,
|
-0.2347699045986119, 0.9483314614936594, -0.1462420505631345, -0.1554470767919033,
|
||||||
-0.9700704038477141, -0.2406071741000039, -0.0251838984227037, 0.0209134074358349,
|
-0.9700704038477141, -0.2406071741000039, -0.0251838984227037, 0.0209134074358349,
|
||||||
0.0593000682318482, -0.1330460003097728, -0.9889057151969489, 0.0291161494720761,
|
0.0593000682318482, -0.1330460003097728, -0.9889057151969489, 0.0291161494720761,
|
||||||
}),
|
}),
|
||||||
wantphiVs: mat64.NewDense(7, 4, []float64{
|
wantphiVs: mat.NewDense(7, 4, []float64{
|
||||||
-0.0027462234108197, 0.0093444513500898, 0.0489643932714296, -0.0154967189805819,
|
-0.0027462234108197, 0.0093444513500898, 0.0489643932714296, -0.0154967189805819,
|
||||||
-0.0428564455279537, -0.0241708702119420, 0.0360723472093996, 0.1838983230588095,
|
-0.0428564455279537, -0.0241708702119420, 0.0360723472093996, 0.1838983230588095,
|
||||||
-1.2248435648802380, 5.6030921364723980, 5.8094144583797025, -4.7926812190419676,
|
-1.2248435648802380, 5.6030921364723980, 5.8094144583797025, -4.7926812190419676,
|
||||||
@@ -140,7 +140,7 @@ tests:
|
|||||||
-0.0233270323101624, 0.1046330818178399, 0.3853045975077387, -0.0160927870102877,
|
-0.0233270323101624, 0.1046330818178399, 0.3853045975077387, -0.0160927870102877,
|
||||||
0.0001293051387859, 0.0004540746921446, -0.0030296315865440, 0.0081895477974654,
|
0.0001293051387859, 0.0004540746921446, -0.0030296315865440, 0.0081895477974654,
|
||||||
}),
|
}),
|
||||||
wantpsiVs: mat64.NewDense(4, 4, []float64{
|
wantpsiVs: mat.NewDense(4, 4, []float64{
|
||||||
0.0301593362017375, -0.3002219289647127, 0.0878217377593682, -1.9583226531517062,
|
0.0301593362017375, -0.3002219289647127, 0.0878217377593682, -1.9583226531517062,
|
||||||
-0.0065483104073892, 0.0392212086716247, -0.0117570776209991, -0.0061113064481860,
|
-0.0065483104073892, 0.0392212086716247, -0.0117570776209991, -0.0061113064481860,
|
||||||
-0.0052075523350125, -0.0045770200452960, -0.0022762313289592, 0.0008441873006821,
|
-0.0052075523350125, -0.0045770200452960, -0.0022762313289592, 0.0008441873006821,
|
||||||
@@ -151,8 +151,8 @@ tests:
|
|||||||
} {
|
} {
|
||||||
var cc stat.CC
|
var cc stat.CC
|
||||||
var corrs []float64
|
var corrs []float64
|
||||||
var pVecs, qVecs *mat64.Dense
|
var pVecs, qVecs *mat.Dense
|
||||||
var phiVs, psiVs *mat64.Dense
|
var phiVs, psiVs *mat.Dense
|
||||||
for j := 0; j < 2; j++ {
|
for j := 0; j < 2; j++ {
|
||||||
err := cc.CanonicalCorrelations(test.xdata, test.ydata, test.weights)
|
err := cc.CanonicalCorrelations(test.xdata, test.ydata, test.weights)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -170,21 +170,21 @@ tests:
|
|||||||
t.Errorf("%d use %d: unexpected variance result got:%v, want:%v",
|
t.Errorf("%d use %d: unexpected variance result got:%v, want:%v",
|
||||||
i, j, corrs, test.wantCorrs)
|
i, j, corrs, test.wantCorrs)
|
||||||
}
|
}
|
||||||
if !mat64.EqualApprox(pVecs, test.wantpVecs, test.epsilon) {
|
if !mat.EqualApprox(pVecs, test.wantpVecs, test.epsilon) {
|
||||||
t.Errorf("%d use %d: unexpected CCA result got:\n%v\nwant:\n%v",
|
t.Errorf("%d use %d: unexpected CCA result got:\n%v\nwant:\n%v",
|
||||||
i, j, mat64.Formatted(pVecs), mat64.Formatted(test.wantpVecs))
|
i, j, mat.Formatted(pVecs), mat.Formatted(test.wantpVecs))
|
||||||
}
|
}
|
||||||
if !mat64.EqualApprox(qVecs, test.wantqVecs, test.epsilon) {
|
if !mat.EqualApprox(qVecs, test.wantqVecs, test.epsilon) {
|
||||||
t.Errorf("%d use %d: unexpected CCA result got:\n%v\nwant:\n%v",
|
t.Errorf("%d use %d: unexpected CCA result got:\n%v\nwant:\n%v",
|
||||||
i, j, mat64.Formatted(qVecs), mat64.Formatted(test.wantqVecs))
|
i, j, mat.Formatted(qVecs), mat.Formatted(test.wantqVecs))
|
||||||
}
|
}
|
||||||
if !mat64.EqualApprox(phiVs, test.wantphiVs, test.epsilon) {
|
if !mat.EqualApprox(phiVs, test.wantphiVs, test.epsilon) {
|
||||||
t.Errorf("%d use %d: unexpected CCA result got:\n%v\nwant:\n%v",
|
t.Errorf("%d use %d: unexpected CCA result got:\n%v\nwant:\n%v",
|
||||||
i, j, mat64.Formatted(phiVs), mat64.Formatted(test.wantphiVs))
|
i, j, mat.Formatted(phiVs), mat.Formatted(test.wantphiVs))
|
||||||
}
|
}
|
||||||
if !mat64.EqualApprox(psiVs, test.wantpsiVs, test.epsilon) {
|
if !mat.EqualApprox(psiVs, test.wantpsiVs, test.epsilon) {
|
||||||
t.Errorf("%d use %d: unexpected CCA result got:\n%v\nwant:\n%v",
|
t.Errorf("%d use %d: unexpected CCA result got:\n%v\nwant:\n%v",
|
||||||
i, j, mat64.Formatted(psiVs), mat64.Formatted(test.wantpsiVs))
|
i, j, mat.Formatted(psiVs), mat.Formatted(test.wantpsiVs))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -9,9 +9,8 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/mathext"
|
"gonum.org/v1/gonum/mathext"
|
||||||
"gonum.org/v1/gonum/matrix"
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
|
||||||
"gonum.org/v1/gonum/stat/distuv"
|
"gonum.org/v1/gonum/stat/distuv"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,12 +29,12 @@ type Wishart struct {
|
|||||||
src *rand.Rand
|
src *rand.Rand
|
||||||
|
|
||||||
dim int
|
dim int
|
||||||
cholv mat64.Cholesky
|
cholv mat.Cholesky
|
||||||
logdetv float64
|
logdetv float64
|
||||||
upper mat64.TriDense
|
upper mat.TriDense
|
||||||
|
|
||||||
once sync.Once
|
once sync.Once
|
||||||
v *mat64.SymDense // only stored if needed
|
v *mat.SymDense // only stored if needed
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWishart returns a new Wishart distribution with the given shape matrix and
|
// NewWishart returns a new Wishart distribution with the given shape matrix and
|
||||||
@@ -43,18 +42,18 @@ type Wishart struct {
|
|||||||
// successful.
|
// successful.
|
||||||
//
|
//
|
||||||
// NewWishart panics if nu <= d - 1 where d is the order of v.
|
// NewWishart panics if nu <= d - 1 where d is the order of v.
|
||||||
func NewWishart(v mat64.Symmetric, nu float64, src *rand.Rand) (*Wishart, bool) {
|
func NewWishart(v mat.Symmetric, nu float64, src *rand.Rand) (*Wishart, bool) {
|
||||||
dim := v.Symmetric()
|
dim := v.Symmetric()
|
||||||
if nu <= float64(dim-1) {
|
if nu <= float64(dim-1) {
|
||||||
panic("wishart: nu must be greater than dim-1")
|
panic("wishart: nu must be greater than dim-1")
|
||||||
}
|
}
|
||||||
var chol mat64.Cholesky
|
var chol mat.Cholesky
|
||||||
ok := chol.Factorize(v)
|
ok := chol.Factorize(v)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
var u mat64.TriDense
|
var u mat.TriDense
|
||||||
u.UFromCholesky(&chol)
|
u.UFromCholesky(&chol)
|
||||||
|
|
||||||
w := &Wishart{
|
w := &Wishart{
|
||||||
@@ -73,9 +72,9 @@ func NewWishart(v mat64.Symmetric, nu float64, src *rand.Rand) (*Wishart, bool)
|
|||||||
// If x is nil, a new matrix is allocated and returned. If x is not nil, the
|
// If x is nil, a new matrix is allocated and returned. If x is not nil, the
|
||||||
// result is stored in-place into x and MeanSym will panic if the order of x
|
// result is stored in-place into x and MeanSym will panic if the order of x
|
||||||
// is not equal to the order of the receiver.
|
// is not equal to the order of the receiver.
|
||||||
func (w *Wishart) MeanSym(x *mat64.SymDense) *mat64.SymDense {
|
func (w *Wishart) MeanSym(x *mat.SymDense) *mat.SymDense {
|
||||||
if x == nil {
|
if x == nil {
|
||||||
x = mat64.NewSymDense(w.dim, nil)
|
x = mat.NewSymDense(w.dim, nil)
|
||||||
}
|
}
|
||||||
d := x.Symmetric()
|
d := x.Symmetric()
|
||||||
if d != w.dim {
|
if d != w.dim {
|
||||||
@@ -89,7 +88,7 @@ func (w *Wishart) MeanSym(x *mat64.SymDense) *mat64.SymDense {
|
|||||||
|
|
||||||
// ProbSym returns the probability of the symmetric matrix x. If x is not positive
|
// ProbSym returns the probability of the symmetric matrix x. If x is not positive
|
||||||
// definite (the Cholesky decomposition fails), it has 0 probability.
|
// definite (the Cholesky decomposition fails), it has 0 probability.
|
||||||
func (w *Wishart) ProbSym(x mat64.Symmetric) float64 {
|
func (w *Wishart) ProbSym(x mat.Symmetric) float64 {
|
||||||
return math.Exp(w.LogProbSym(x))
|
return math.Exp(w.LogProbSym(x))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,12 +96,12 @@ func (w *Wishart) ProbSym(x mat64.Symmetric) float64 {
|
|||||||
//
|
//
|
||||||
// LogProbSym returns -∞ if the input matrix is not positive definite (the Cholesky
|
// LogProbSym returns -∞ if the input matrix is not positive definite (the Cholesky
|
||||||
// decomposition fails).
|
// decomposition fails).
|
||||||
func (w *Wishart) LogProbSym(x mat64.Symmetric) float64 {
|
func (w *Wishart) LogProbSym(x mat.Symmetric) float64 {
|
||||||
dim := x.Symmetric()
|
dim := x.Symmetric()
|
||||||
if dim != w.dim {
|
if dim != w.dim {
|
||||||
panic(badDim)
|
panic(badDim)
|
||||||
}
|
}
|
||||||
var chol mat64.Cholesky
|
var chol mat.Cholesky
|
||||||
ok := chol.Factorize(x)
|
ok := chol.Factorize(x)
|
||||||
if !ok {
|
if !ok {
|
||||||
return math.Inf(-1)
|
return math.Inf(-1)
|
||||||
@@ -112,7 +111,7 @@ func (w *Wishart) LogProbSym(x mat64.Symmetric) float64 {
|
|||||||
|
|
||||||
// LogProbSymChol returns the log of the probability of the input symmetric matrix
|
// LogProbSymChol returns the log of the probability of the input symmetric matrix
|
||||||
// given its Cholesky decomposition.
|
// given its Cholesky decomposition.
|
||||||
func (w *Wishart) LogProbSymChol(cholX *mat64.Cholesky) float64 {
|
func (w *Wishart) LogProbSymChol(cholX *mat.Cholesky) float64 {
|
||||||
dim := cholX.Size()
|
dim := cholX.Size()
|
||||||
if dim != w.dim {
|
if dim != w.dim {
|
||||||
panic(badDim)
|
panic(badDim)
|
||||||
@@ -120,7 +119,7 @@ func (w *Wishart) LogProbSymChol(cholX *mat64.Cholesky) float64 {
|
|||||||
return w.logProbSymChol(cholX)
|
return w.logProbSymChol(cholX)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Wishart) logProbSymChol(cholX *mat64.Cholesky) float64 {
|
func (w *Wishart) logProbSymChol(cholX *mat.Cholesky) float64 {
|
||||||
// The PDF is
|
// The PDF is
|
||||||
// p(X) = [|X|^((ν-d-1)/2) * exp(-tr(V^-1 * X)/2)] / [2^(ν*d/2) * |V|^(ν/2) * Γ_d(ν/2)]
|
// p(X) = [|X|^((ν-d-1)/2) * exp(-tr(V^-1 * X)/2)] / [2^(ν*d/2) * |V|^(ν/2) * Γ_d(ν/2)]
|
||||||
// The LogPDF is thus
|
// The LogPDF is thus
|
||||||
@@ -128,16 +127,16 @@ func (w *Wishart) logProbSymChol(cholX *mat64.Cholesky) float64 {
|
|||||||
logdetx := cholX.LogDet()
|
logdetx := cholX.LogDet()
|
||||||
|
|
||||||
// Compute tr(V^-1 * X), using the fact that X = U^T * U.
|
// Compute tr(V^-1 * X), using the fact that X = U^T * U.
|
||||||
var u mat64.TriDense
|
var u mat.TriDense
|
||||||
u.UFromCholesky(cholX)
|
u.UFromCholesky(cholX)
|
||||||
|
|
||||||
var vinvx mat64.Dense
|
var vinvx mat.Dense
|
||||||
err := vinvx.SolveCholesky(&w.cholv, u.T())
|
err := vinvx.SolveCholesky(&w.cholv, u.T())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return math.Inf(-1)
|
return math.Inf(-1)
|
||||||
}
|
}
|
||||||
vinvx.Mul(&vinvx, &u)
|
vinvx.Mul(&vinvx, &u)
|
||||||
tr := mat64.Trace(&vinvx)
|
tr := mat.Trace(&vinvx)
|
||||||
|
|
||||||
fnu := float64(w.nu)
|
fnu := float64(w.nu)
|
||||||
fdim := float64(w.dim)
|
fdim := float64(w.dim)
|
||||||
@@ -146,18 +145,18 @@ func (w *Wishart) logProbSymChol(cholX *mat64.Cholesky) float64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RandSym generates a random symmetric matrix from the distribution.
|
// RandSym generates a random symmetric matrix from the distribution.
|
||||||
func (w *Wishart) RandSym(x *mat64.SymDense) *mat64.SymDense {
|
func (w *Wishart) RandSym(x *mat.SymDense) *mat.SymDense {
|
||||||
if x == nil {
|
if x == nil {
|
||||||
x = &mat64.SymDense{}
|
x = &mat.SymDense{}
|
||||||
}
|
}
|
||||||
var c mat64.Cholesky
|
var c mat.Cholesky
|
||||||
w.RandChol(&c)
|
w.RandChol(&c)
|
||||||
x.FromCholesky(&c)
|
x.FromCholesky(&c)
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
|
||||||
// RandChol generates the Cholesky decomposition of a random matrix from the distribution.
|
// RandChol generates the Cholesky decomposition of a random matrix from the distribution.
|
||||||
func (w *Wishart) RandChol(c *mat64.Cholesky) *mat64.Cholesky {
|
func (w *Wishart) RandChol(c *mat.Cholesky) *mat.Cholesky {
|
||||||
// TODO(btracey): Modify the code if the underlying data from c is exposed
|
// TODO(btracey): Modify the code if the underlying data from c is exposed
|
||||||
// to avoid the dim^2 allocation here.
|
// to avoid the dim^2 allocation here.
|
||||||
|
|
||||||
@@ -179,7 +178,7 @@ func (w *Wishart) RandChol(c *mat64.Cholesky) *mat64.Cholesky {
|
|||||||
Source: w.src,
|
Source: w.src,
|
||||||
}
|
}
|
||||||
|
|
||||||
t := mat64.NewTriDense(w.dim, matrix.Upper, nil)
|
t := mat.NewTriDense(w.dim, mat.Upper, nil)
|
||||||
for i := 0; i < w.dim; i++ {
|
for i := 0; i < w.dim; i++ {
|
||||||
v := distuv.ChiSquared{
|
v := distuv.ChiSquared{
|
||||||
K: w.nu - float64(i),
|
K: w.nu - float64(i),
|
||||||
@@ -195,7 +194,7 @@ func (w *Wishart) RandChol(c *mat64.Cholesky) *mat64.Cholesky {
|
|||||||
|
|
||||||
t.MulTri(t, &w.upper)
|
t.MulTri(t, &w.upper)
|
||||||
if c == nil {
|
if c == nil {
|
||||||
c = &mat64.Cholesky{}
|
c = &mat.Cholesky{}
|
||||||
}
|
}
|
||||||
c.SetFromU(t)
|
c.SetFromU(t)
|
||||||
return c
|
return c
|
||||||
@@ -204,7 +203,7 @@ func (w *Wishart) RandChol(c *mat64.Cholesky) *mat64.Cholesky {
|
|||||||
// setV computes and stores the covariance matrix of the distribution.
|
// setV computes and stores the covariance matrix of the distribution.
|
||||||
func (w *Wishart) setV() {
|
func (w *Wishart) setV() {
|
||||||
w.once.Do(func() {
|
w.once.Do(func() {
|
||||||
w.v = mat64.NewSymDense(w.dim, nil)
|
w.v = mat.NewSymDense(w.dim, nil)
|
||||||
w.v.FromCholesky(&w.cholv)
|
w.v.FromCholesky(&w.cholv)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@@ -10,39 +10,39 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWishart(t *testing.T) {
|
func TestWishart(t *testing.T) {
|
||||||
for c, test := range []struct {
|
for c, test := range []struct {
|
||||||
v *mat64.SymDense
|
v *mat.SymDense
|
||||||
nu float64
|
nu float64
|
||||||
xs []*mat64.SymDense
|
xs []*mat.SymDense
|
||||||
lps []float64
|
lps []float64
|
||||||
}{
|
}{
|
||||||
// Logprob data compared with scipy.
|
// Logprob data compared with scipy.
|
||||||
{
|
{
|
||||||
v: mat64.NewSymDense(2, []float64{1, 0, 0, 1}),
|
v: mat.NewSymDense(2, []float64{1, 0, 0, 1}),
|
||||||
nu: 4,
|
nu: 4,
|
||||||
xs: []*mat64.SymDense{
|
xs: []*mat.SymDense{
|
||||||
mat64.NewSymDense(2, []float64{0.9, 0.1, 0.1, 0.9}),
|
mat.NewSymDense(2, []float64{0.9, 0.1, 0.1, 0.9}),
|
||||||
},
|
},
|
||||||
lps: []float64{-4.2357432031863409},
|
lps: []float64{-4.2357432031863409},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
v: mat64.NewSymDense(2, []float64{0.8, -0.2, -0.2, 0.7}),
|
v: mat.NewSymDense(2, []float64{0.8, -0.2, -0.2, 0.7}),
|
||||||
nu: 5,
|
nu: 5,
|
||||||
xs: []*mat64.SymDense{
|
xs: []*mat.SymDense{
|
||||||
mat64.NewSymDense(2, []float64{0.9, 0.1, 0.1, 0.9}),
|
mat.NewSymDense(2, []float64{0.9, 0.1, 0.1, 0.9}),
|
||||||
mat64.NewSymDense(2, []float64{0.3, -0.1, -0.1, 0.7}),
|
mat.NewSymDense(2, []float64{0.3, -0.1, -0.1, 0.7}),
|
||||||
},
|
},
|
||||||
lps: []float64{-4.2476495605333575, -4.9993285370378633},
|
lps: []float64{-4.2476495605333575, -4.9993285370378633},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
v: mat64.NewSymDense(3, []float64{0.8, 0.3, 0.1, 0.3, 0.7, -0.1, 0.1, -0.1, 7}),
|
v: mat.NewSymDense(3, []float64{0.8, 0.3, 0.1, 0.3, 0.7, -0.1, 0.1, -0.1, 7}),
|
||||||
nu: 5,
|
nu: 5,
|
||||||
xs: []*mat64.SymDense{
|
xs: []*mat.SymDense{
|
||||||
mat64.NewSymDense(3, []float64{1, 0.2, -0.3, 0.2, 0.6, -0.2, -0.3, -0.2, 6}),
|
mat.NewSymDense(3, []float64{1, 0.2, -0.3, 0.2, 0.6, -0.2, -0.3, -0.2, 6}),
|
||||||
},
|
},
|
||||||
lps: []float64{-11.010982249229421},
|
lps: []float64{-11.010982249229421},
|
||||||
},
|
},
|
||||||
@@ -54,7 +54,7 @@ func TestWishart(t *testing.T) {
|
|||||||
for i, x := range test.xs {
|
for i, x := range test.xs {
|
||||||
lp := w.LogProbSym(x)
|
lp := w.LogProbSym(x)
|
||||||
|
|
||||||
var chol mat64.Cholesky
|
var chol mat.Cholesky
|
||||||
ok := chol.Factorize(x)
|
ok := chol.Factorize(x)
|
||||||
if !ok {
|
if !ok {
|
||||||
panic("bad test")
|
panic("bad test")
|
||||||
@@ -80,25 +80,25 @@ func TestWishart(t *testing.T) {
|
|||||||
|
|
||||||
func TestWishartRand(t *testing.T) {
|
func TestWishartRand(t *testing.T) {
|
||||||
for c, test := range []struct {
|
for c, test := range []struct {
|
||||||
v *mat64.SymDense
|
v *mat.SymDense
|
||||||
nu float64
|
nu float64
|
||||||
samples int
|
samples int
|
||||||
tol float64
|
tol float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
v: mat64.NewSymDense(2, []float64{0.8, -0.2, -0.2, 0.7}),
|
v: mat.NewSymDense(2, []float64{0.8, -0.2, -0.2, 0.7}),
|
||||||
nu: 5,
|
nu: 5,
|
||||||
samples: 30000,
|
samples: 30000,
|
||||||
tol: 3e-2,
|
tol: 3e-2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
v: mat64.NewSymDense(3, []float64{0.8, 0.3, 0.1, 0.3, 0.7, -0.1, 0.1, -0.1, 7}),
|
v: mat.NewSymDense(3, []float64{0.8, 0.3, 0.1, 0.3, 0.7, -0.1, 0.1, -0.1, 7}),
|
||||||
nu: 5,
|
nu: 5,
|
||||||
samples: 300000,
|
samples: 300000,
|
||||||
tol: 3e-2,
|
tol: 3e-2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
v: mat64.NewSymDense(4, []float64{
|
v: mat.NewSymDense(4, []float64{
|
||||||
0.8, 0.3, 0.1, -0.2,
|
0.8, 0.3, 0.1, -0.2,
|
||||||
0.3, 0.7, -0.1, 0.4,
|
0.3, 0.7, -0.1, 0.4,
|
||||||
0.1, -0.1, 7, 1,
|
0.1, -0.1, 7, 1,
|
||||||
@@ -114,16 +114,16 @@ func TestWishartRand(t *testing.T) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
panic("bad test")
|
panic("bad test")
|
||||||
}
|
}
|
||||||
mean := mat64.NewSymDense(dim, nil)
|
mean := mat.NewSymDense(dim, nil)
|
||||||
x := mat64.NewSymDense(dim, nil)
|
x := mat.NewSymDense(dim, nil)
|
||||||
for i := 0; i < test.samples; i++ {
|
for i := 0; i < test.samples; i++ {
|
||||||
w.RandSym(x)
|
w.RandSym(x)
|
||||||
x.ScaleSym(1/float64(test.samples), x)
|
x.ScaleSym(1/float64(test.samples), x)
|
||||||
mean.AddSym(mean, x)
|
mean.AddSym(mean, x)
|
||||||
}
|
}
|
||||||
trueMean := w.MeanSym(nil)
|
trueMean := w.MeanSym(nil)
|
||||||
if !mat64.EqualApprox(trueMean, mean, test.tol) {
|
if !mat.EqualApprox(trueMean, mean, test.tol) {
|
||||||
t.Errorf("Case %d: Mismatch between estimated and true mean. Got\n%0.4v\nWant\n%0.4v\n", c, mat64.Formatted(mean), mat64.Formatted(trueMean))
|
t.Errorf("Case %d: Mismatch between estimated and true mean. Got\n%0.4v\nWant\n%0.4v\n", c, mat.Formatted(mean), mat.Formatted(trueMean))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat/distuv"
|
"gonum.org/v1/gonum/stat/distuv"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -61,11 +61,11 @@ func NewDirichlet(alpha []float64, src *rand.Rand) *Dirichlet {
|
|||||||
// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])]
|
// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])]
|
||||||
// If the input matrix is nil a new matrix is allocated, otherwise the result
|
// If the input matrix is nil a new matrix is allocated, otherwise the result
|
||||||
// is stored in-place into the input.
|
// is stored in-place into the input.
|
||||||
func (d *Dirichlet) CovarianceMatrix(cov *mat64.SymDense) *mat64.SymDense {
|
func (d *Dirichlet) CovarianceMatrix(cov *mat.SymDense) *mat.SymDense {
|
||||||
if cov == nil {
|
if cov == nil {
|
||||||
cov = mat64.NewSymDense(d.Dim(), nil)
|
cov = mat.NewSymDense(d.Dim(), nil)
|
||||||
} else if cov.Symmetric() == 0 {
|
} else if cov.Symmetric() == 0 {
|
||||||
*cov = *(cov.GrowSquare(d.dim).(*mat64.SymDense))
|
*cov = *(cov.GrowSquare(d.dim).(*mat.SymDense))
|
||||||
} else if cov.Symmetric() != d.dim {
|
} else if cov.Symmetric() != d.dim {
|
||||||
panic("normal: input matrix size mismatch")
|
panic("normal: input matrix size mismatch")
|
||||||
}
|
}
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDirichlet(t *testing.T) {
|
func TestDirichlet(t *testing.T) {
|
||||||
@@ -64,7 +64,7 @@ func TestDirichlet(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
d := test.Dir
|
d := test.Dir
|
||||||
dim := d.Dim()
|
dim := d.Dim()
|
||||||
x := mat64.NewDense(test.N, dim, nil)
|
x := mat.NewDense(test.N, dim, nil)
|
||||||
generateSamples(x, d)
|
generateSamples(x, d)
|
||||||
checkMean(t, cas, x, d, 1e-3)
|
checkMean(t, cas, x, d, 1e-3)
|
||||||
checkCov(t, cas, x, d, 1e-3)
|
checkCov(t, cas, x, d, 1e-3)
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -37,7 +37,7 @@ func testProbability(t *testing.T, cases []probCase) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateSamples(x *mat64.Dense, r Rander) {
|
func generateSamples(x *mat.Dense, r Rander) {
|
||||||
n, _ := x.Dims()
|
n, _ := x.Dims()
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
r.Rand(x.RawRowView(i))
|
r.Rand(x.RawRowView(i))
|
||||||
@@ -48,7 +48,7 @@ type Meaner interface {
|
|||||||
Mean([]float64) []float64
|
Mean([]float64) []float64
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkMean(t *testing.T, cas int, x *mat64.Dense, m Meaner, tol float64) {
|
func checkMean(t *testing.T, cas int, x *mat.Dense, m Meaner, tol float64) {
|
||||||
mean := m.Mean(nil)
|
mean := m.Mean(nil)
|
||||||
|
|
||||||
// Check that the answer is identical when using nil or non-nil.
|
// Check that the answer is identical when using nil or non-nil.
|
||||||
@@ -63,7 +63,7 @@ func checkMean(t *testing.T, cas int, x *mat64.Dense, m Meaner, tol float64) {
|
|||||||
col := make([]float64, r)
|
col := make([]float64, r)
|
||||||
meanEst := make([]float64, len(mean))
|
meanEst := make([]float64, len(mean))
|
||||||
for i := range meanEst {
|
for i := range meanEst {
|
||||||
meanEst[i] = stat.Mean(mat64.Col(col, i, x), nil)
|
meanEst[i] = stat.Mean(mat.Col(col, i, x), nil)
|
||||||
}
|
}
|
||||||
if !floats.EqualApprox(mean, meanEst, tol) {
|
if !floats.EqualApprox(mean, meanEst, tol) {
|
||||||
t.Errorf("Returned mean and sample mean mismatch. Case %v. Empirical %v, returned %v", cas, meanEst, mean)
|
t.Errorf("Returned mean and sample mean mismatch. Case %v. Empirical %v, returned %v", cas, meanEst, mean)
|
||||||
@@ -71,26 +71,26 @@ func checkMean(t *testing.T, cas int, x *mat64.Dense, m Meaner, tol float64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Cover interface {
|
type Cover interface {
|
||||||
CovarianceMatrix(*mat64.SymDense) *mat64.SymDense
|
CovarianceMatrix(*mat.SymDense) *mat.SymDense
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkCov(t *testing.T, cas int, x *mat64.Dense, c Cover, tol float64) {
|
func checkCov(t *testing.T, cas int, x *mat.Dense, c Cover, tol float64) {
|
||||||
cov := c.CovarianceMatrix(nil)
|
cov := c.CovarianceMatrix(nil)
|
||||||
n := cov.Symmetric()
|
n := cov.Symmetric()
|
||||||
cov2 := mat64.NewSymDense(n, nil)
|
cov2 := mat.NewSymDense(n, nil)
|
||||||
c.CovarianceMatrix(cov2)
|
c.CovarianceMatrix(cov2)
|
||||||
if !mat64.Equal(cov, cov2) {
|
if !mat.Equal(cov, cov2) {
|
||||||
t.Errorf("Cov mismatch when providing nil and matrix. Case %v", cas)
|
t.Errorf("Cov mismatch when providing nil and matrix. Case %v", cas)
|
||||||
}
|
}
|
||||||
var cov3 mat64.SymDense
|
var cov3 mat.SymDense
|
||||||
c.CovarianceMatrix(&cov3)
|
c.CovarianceMatrix(&cov3)
|
||||||
if !mat64.Equal(cov, &cov3) {
|
if !mat.Equal(cov, &cov3) {
|
||||||
t.Errorf("Cov mismatch when providing zero matrix. Case %v", cas)
|
t.Errorf("Cov mismatch when providing zero matrix. Case %v", cas)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the covariance matrix matches the samples
|
// Check that the covariance matrix matches the samples
|
||||||
covEst := stat.CovarianceMatrix(nil, x, nil)
|
covEst := stat.CovarianceMatrix(nil, x, nil)
|
||||||
if !mat64.EqualApprox(covEst, cov, tol) {
|
if !mat.EqualApprox(covEst, cov, tol) {
|
||||||
t.Errorf("Return cov and sample cov mismatch. Cas %v.\nGot:\n%0.4v\nWant:\n%0.4v", cas, mat64.Formatted(cov), mat64.Formatted(covEst))
|
t.Errorf("Return cov and sample cov mismatch. Cas %v.\nGot:\n%0.4v\nWant:\n%0.4v", cas, mat.Formatted(cov), mat.Formatted(covEst))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
"gonum.org/v1/gonum/stat/distuv"
|
"gonum.org/v1/gonum/stat/distuv"
|
||||||
)
|
)
|
||||||
@@ -26,10 +26,10 @@ var (
|
|||||||
type Normal struct {
|
type Normal struct {
|
||||||
mu []float64
|
mu []float64
|
||||||
|
|
||||||
sigma mat64.SymDense
|
sigma mat.SymDense
|
||||||
|
|
||||||
chol mat64.Cholesky
|
chol mat.Cholesky
|
||||||
lower mat64.TriDense
|
lower mat.TriDense
|
||||||
logSqrtDet float64
|
logSqrtDet float64
|
||||||
dim int
|
dim int
|
||||||
|
|
||||||
@@ -39,7 +39,7 @@ type Normal struct {
|
|||||||
// NewNormal creates a new Normal with the given mean and covariance matrix.
|
// NewNormal creates a new Normal with the given mean and covariance matrix.
|
||||||
// NewNormal panics if len(mu) == 0, or if len(mu) != sigma.N. If the covariance
|
// NewNormal panics if len(mu) == 0, or if len(mu) != sigma.N. If the covariance
|
||||||
// matrix is not positive-definite, the returned boolean is false.
|
// matrix is not positive-definite, the returned boolean is false.
|
||||||
func NewNormal(mu []float64, sigma mat64.Symmetric, src *rand.Rand) (*Normal, bool) {
|
func NewNormal(mu []float64, sigma mat.Symmetric, src *rand.Rand) (*Normal, bool) {
|
||||||
if len(mu) == 0 {
|
if len(mu) == 0 {
|
||||||
panic(badZeroDimension)
|
panic(badZeroDimension)
|
||||||
}
|
}
|
||||||
@@ -57,7 +57,7 @@ func NewNormal(mu []float64, sigma mat64.Symmetric, src *rand.Rand) (*Normal, bo
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
n.sigma = *mat64.NewSymDense(dim, nil)
|
n.sigma = *mat.NewSymDense(dim, nil)
|
||||||
n.sigma.CopySym(sigma)
|
n.sigma.CopySym(sigma)
|
||||||
n.lower.LFromCholesky(&n.chol)
|
n.lower.LFromCholesky(&n.chol)
|
||||||
n.logSqrtDet = 0.5 * n.chol.LogDet()
|
n.logSqrtDet = 0.5 * n.chol.LogDet()
|
||||||
@@ -67,7 +67,7 @@ func NewNormal(mu []float64, sigma mat64.Symmetric, src *rand.Rand) (*Normal, bo
|
|||||||
// NewNormalChol creates a new Normal distribution with the given mean and
|
// NewNormalChol creates a new Normal distribution with the given mean and
|
||||||
// covariance matrix represented by its Cholesky decomposition. NewNormalChol
|
// covariance matrix represented by its Cholesky decomposition. NewNormalChol
|
||||||
// panics if len(mu) is not equal to chol.Size().
|
// panics if len(mu) is not equal to chol.Size().
|
||||||
func NewNormalChol(mu []float64, chol *mat64.Cholesky, src *rand.Rand) *Normal {
|
func NewNormalChol(mu []float64, chol *mat.Cholesky, src *rand.Rand) *Normal {
|
||||||
dim := len(mu)
|
dim := len(mu)
|
||||||
if dim != chol.Size() {
|
if dim != chol.Size() {
|
||||||
panic(badSizeMismatch)
|
panic(badSizeMismatch)
|
||||||
@@ -89,7 +89,7 @@ func NewNormalChol(mu []float64, chol *mat64.Cholesky, src *rand.Rand) *Normal {
|
|||||||
// panics if len(mu) is not equal to prec.Symmetric(). If the precision matrix
|
// panics if len(mu) is not equal to prec.Symmetric(). If the precision matrix
|
||||||
// is not positive-definite, NewNormalPrecision returns nil for norm and false
|
// is not positive-definite, NewNormalPrecision returns nil for norm and false
|
||||||
// for ok.
|
// for ok.
|
||||||
func NewNormalPrecision(mu []float64, prec *mat64.SymDense, src *rand.Rand) (norm *Normal, ok bool) {
|
func NewNormalPrecision(mu []float64, prec *mat.SymDense, src *rand.Rand) (norm *Normal, ok bool) {
|
||||||
if len(mu) == 0 {
|
if len(mu) == 0 {
|
||||||
panic(badZeroDimension)
|
panic(badZeroDimension)
|
||||||
}
|
}
|
||||||
@@ -102,12 +102,12 @@ func NewNormalPrecision(mu []float64, prec *mat64.SymDense, src *rand.Rand) (nor
|
|||||||
// is much better, but this still loses precision. It is worth considering if
|
// is much better, but this still loses precision. It is worth considering if
|
||||||
// instead the precision matrix should be stored explicitly and used instead
|
// instead the precision matrix should be stored explicitly and used instead
|
||||||
// of the Cholesky decomposition of the covariance matrix where appropriate.
|
// of the Cholesky decomposition of the covariance matrix where appropriate.
|
||||||
var chol mat64.Cholesky
|
var chol mat.Cholesky
|
||||||
ok = chol.Factorize(prec)
|
ok = chol.Factorize(prec)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
var sigma mat64.SymDense
|
var sigma mat.SymDense
|
||||||
sigma.InverseCholesky(&chol)
|
sigma.InverseCholesky(&chol)
|
||||||
return NewNormal(mu, &sigma, src)
|
return NewNormal(mu, &sigma, src)
|
||||||
}
|
}
|
||||||
@@ -154,9 +154,9 @@ func (n *Normal) ConditionNormal(observed []int, values []float64, src *rand.Ran
|
|||||||
// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])]
|
// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])]
|
||||||
// If the input matrix is nil a new matrix is allocated, otherwise the result
|
// If the input matrix is nil a new matrix is allocated, otherwise the result
|
||||||
// is stored in-place into the input.
|
// is stored in-place into the input.
|
||||||
func (n *Normal) CovarianceMatrix(s *mat64.SymDense) *mat64.SymDense {
|
func (n *Normal) CovarianceMatrix(s *mat.SymDense) *mat.SymDense {
|
||||||
if s == nil {
|
if s == nil {
|
||||||
s = mat64.NewSymDense(n.Dim(), nil)
|
s = mat.NewSymDense(n.Dim(), nil)
|
||||||
}
|
}
|
||||||
sn := s.Symmetric()
|
sn := s.Symmetric()
|
||||||
if sn != n.Dim() {
|
if sn != n.Dim() {
|
||||||
@@ -183,7 +183,7 @@ func (n *Normal) LogProb(x []float64) float64 {
|
|||||||
panic(badSizeMismatch)
|
panic(badSizeMismatch)
|
||||||
}
|
}
|
||||||
c := -0.5*float64(dim)*logTwoPi - n.logSqrtDet
|
c := -0.5*float64(dim)*logTwoPi - n.logSqrtDet
|
||||||
dst := stat.Mahalanobis(mat64.NewVector(dim, x), mat64.NewVector(dim, n.mu), &n.chol)
|
dst := stat.Mahalanobis(mat.NewVector(dim, x), mat.NewVector(dim, n.mu), &n.chol)
|
||||||
return c - 0.5*dst*dst
|
return c - 0.5*dst*dst
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -199,7 +199,7 @@ func (n *Normal) MarginalNormal(vars []int, src *rand.Rand) (*Normal, bool) {
|
|||||||
for i, v := range vars {
|
for i, v := range vars {
|
||||||
newMean[i] = n.mu[v]
|
newMean[i] = n.mu[v]
|
||||||
}
|
}
|
||||||
var s mat64.SymDense
|
var s mat.SymDense
|
||||||
s.SubsetSym(&n.sigma, vars)
|
s.SubsetSym(&n.sigma, vars)
|
||||||
return NewNormal(newMean, &s, src)
|
return NewNormal(newMean, &s, src)
|
||||||
}
|
}
|
||||||
@@ -308,8 +308,8 @@ func (n *Normal) TransformNormal(dst, normal []float64) []float64 {
|
|||||||
// transformNormal performs the same operation as TransformNormal except no
|
// transformNormal performs the same operation as TransformNormal except no
|
||||||
// safety checks are performed and both input slices must be non-nil.
|
// safety checks are performed and both input slices must be non-nil.
|
||||||
func (n *Normal) transformNormal(dst, normal []float64) []float64 {
|
func (n *Normal) transformNormal(dst, normal []float64) []float64 {
|
||||||
srcVec := mat64.NewVector(n.dim, normal)
|
srcVec := mat.NewVector(n.dim, normal)
|
||||||
dstVec := mat64.NewVector(n.dim, dst)
|
dstVec := mat.NewVector(n.dim, dst)
|
||||||
dstVec.MulVec(&n.lower, srcVec)
|
dstVec.MulVec(&n.lower, srcVec)
|
||||||
floats.Add(dst, n.mu)
|
floats.Add(dst, n.mu)
|
||||||
return dst
|
return dst
|
||||||
|
@@ -10,24 +10,24 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mvTest struct {
|
type mvTest struct {
|
||||||
Mu []float64
|
Mu []float64
|
||||||
Sigma *mat64.SymDense
|
Sigma *mat.SymDense
|
||||||
Loc []float64
|
Loc []float64
|
||||||
Logprob float64
|
Logprob float64
|
||||||
Prob float64
|
Prob float64
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNormProbs(t *testing.T) {
|
func TestNormProbs(t *testing.T) {
|
||||||
dist1, ok := NewNormal([]float64{0, 0}, mat64.NewSymDense(2, []float64{1, 0, 0, 1}), nil)
|
dist1, ok := NewNormal([]float64{0, 0}, mat.NewSymDense(2, []float64{1, 0, 0, 1}), nil)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("bad test")
|
t.Errorf("bad test")
|
||||||
}
|
}
|
||||||
dist2, ok := NewNormal([]float64{6, 7}, mat64.NewSymDense(2, []float64{8, 2, 0, 4}), nil)
|
dist2, ok := NewNormal([]float64{6, 7}, mat.NewSymDense(2, []float64{8, 2, 0, 4}), nil)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("bad test")
|
t.Errorf("bad test")
|
||||||
}
|
}
|
||||||
@@ -53,14 +53,14 @@ func TestNormProbs(t *testing.T) {
|
|||||||
func TestNewNormalChol(t *testing.T) {
|
func TestNewNormalChol(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
mean []float64
|
mean []float64
|
||||||
cov *mat64.SymDense
|
cov *mat.SymDense
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
mean: []float64{2, 3},
|
mean: []float64{2, 3},
|
||||||
cov: mat64.NewSymDense(2, []float64{1, 0.1, 0.1, 1}),
|
cov: mat.NewSymDense(2, []float64{1, 0.1, 0.1, 1}),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
var chol mat64.Cholesky
|
var chol mat.Cholesky
|
||||||
ok := chol.Factorize(test.cov)
|
ok := chol.Factorize(test.cov)
|
||||||
if !ok {
|
if !ok {
|
||||||
panic("bad test")
|
panic("bad test")
|
||||||
@@ -101,26 +101,26 @@ func TestNormRand(t *testing.T) {
|
|||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
dim := len(test.mean)
|
dim := len(test.mean)
|
||||||
cov := mat64.NewSymDense(dim, test.cov)
|
cov := mat.NewSymDense(dim, test.cov)
|
||||||
n, ok := NewNormal(test.mean, cov, nil)
|
n, ok := NewNormal(test.mean, cov, nil)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("bad covariance matrix")
|
t.Errorf("bad covariance matrix")
|
||||||
}
|
}
|
||||||
|
|
||||||
nSamples := 1000000
|
nSamples := 1000000
|
||||||
samps := mat64.NewDense(nSamples, dim, nil)
|
samps := mat.NewDense(nSamples, dim, nil)
|
||||||
for i := 0; i < nSamples; i++ {
|
for i := 0; i < nSamples; i++ {
|
||||||
n.Rand(samps.RawRowView(i))
|
n.Rand(samps.RawRowView(i))
|
||||||
}
|
}
|
||||||
estMean := make([]float64, dim)
|
estMean := make([]float64, dim)
|
||||||
for i := range estMean {
|
for i := range estMean {
|
||||||
estMean[i] = stat.Mean(mat64.Col(nil, i, samps), nil)
|
estMean[i] = stat.Mean(mat.Col(nil, i, samps), nil)
|
||||||
}
|
}
|
||||||
if !floats.EqualApprox(estMean, test.mean, 1e-2) {
|
if !floats.EqualApprox(estMean, test.mean, 1e-2) {
|
||||||
t.Errorf("Mean mismatch: want: %v, got %v", test.mean, estMean)
|
t.Errorf("Mean mismatch: want: %v, got %v", test.mean, estMean)
|
||||||
}
|
}
|
||||||
estCov := stat.CovarianceMatrix(nil, samps, nil)
|
estCov := stat.CovarianceMatrix(nil, samps, nil)
|
||||||
if !mat64.EqualApprox(estCov, cov, 1e-2) {
|
if !mat.EqualApprox(estCov, cov, 1e-2) {
|
||||||
t.Errorf("Cov mismatch: want: %v, got %v", cov, estCov)
|
t.Errorf("Cov mismatch: want: %v, got %v", cov, estCov)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -140,7 +140,7 @@ func TestNormalQuantile(t *testing.T) {
|
|||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
dim := len(test.mean)
|
dim := len(test.mean)
|
||||||
cov := mat64.NewSymDense(dim, test.cov)
|
cov := mat.NewSymDense(dim, test.cov)
|
||||||
n, ok := NewNormal(test.mean, cov, nil)
|
n, ok := NewNormal(test.mean, cov, nil)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("bad covariance matrix")
|
t.Errorf("bad covariance matrix")
|
||||||
@@ -148,7 +148,7 @@ func TestNormalQuantile(t *testing.T) {
|
|||||||
|
|
||||||
nSamples := 1000000
|
nSamples := 1000000
|
||||||
rnd := rand.New(rand.NewSource(1))
|
rnd := rand.New(rand.NewSource(1))
|
||||||
samps := mat64.NewDense(nSamples, dim, nil)
|
samps := mat.NewDense(nSamples, dim, nil)
|
||||||
tmp := make([]float64, dim)
|
tmp := make([]float64, dim)
|
||||||
for i := 0; i < nSamples; i++ {
|
for i := 0; i < nSamples; i++ {
|
||||||
for j := range tmp {
|
for j := range tmp {
|
||||||
@@ -158,13 +158,13 @@ func TestNormalQuantile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
estMean := make([]float64, dim)
|
estMean := make([]float64, dim)
|
||||||
for i := range estMean {
|
for i := range estMean {
|
||||||
estMean[i] = stat.Mean(mat64.Col(nil, i, samps), nil)
|
estMean[i] = stat.Mean(mat.Col(nil, i, samps), nil)
|
||||||
}
|
}
|
||||||
if !floats.EqualApprox(estMean, test.mean, 1e-2) {
|
if !floats.EqualApprox(estMean, test.mean, 1e-2) {
|
||||||
t.Errorf("Mean mismatch: want: %v, got %v", test.mean, estMean)
|
t.Errorf("Mean mismatch: want: %v, got %v", test.mean, estMean)
|
||||||
}
|
}
|
||||||
estCov := stat.CovarianceMatrix(nil, samps, nil)
|
estCov := stat.CovarianceMatrix(nil, samps, nil)
|
||||||
if !mat64.EqualApprox(estCov, cov, 1e-2) {
|
if !mat.EqualApprox(estCov, cov, 1e-2) {
|
||||||
t.Errorf("Cov mismatch: want: %v, got %v", cov, estCov)
|
t.Errorf("Cov mismatch: want: %v, got %v", cov, estCov)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -174,57 +174,57 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
// Uncorrelated values shouldn't influence the updated values.
|
// Uncorrelated values shouldn't influence the updated values.
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
mu []float64
|
mu []float64
|
||||||
sigma *mat64.SymDense
|
sigma *mat.SymDense
|
||||||
observed []int
|
observed []int
|
||||||
values []float64
|
values []float64
|
||||||
|
|
||||||
newMu []float64
|
newMu []float64
|
||||||
newSigma *mat64.SymDense
|
newSigma *mat.SymDense
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3},
|
mu: []float64{2, 3},
|
||||||
sigma: mat64.NewSymDense(2, []float64{2, 0, 0, 5}),
|
sigma: mat.NewSymDense(2, []float64{2, 0, 0, 5}),
|
||||||
observed: []int{0},
|
observed: []int{0},
|
||||||
values: []float64{10},
|
values: []float64{10},
|
||||||
|
|
||||||
newMu: []float64{3},
|
newMu: []float64{3},
|
||||||
newSigma: mat64.NewSymDense(1, []float64{5}),
|
newSigma: mat.NewSymDense(1, []float64{5}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3},
|
mu: []float64{2, 3},
|
||||||
sigma: mat64.NewSymDense(2, []float64{2, 0, 0, 5}),
|
sigma: mat.NewSymDense(2, []float64{2, 0, 0, 5}),
|
||||||
observed: []int{1},
|
observed: []int{1},
|
||||||
values: []float64{10},
|
values: []float64{10},
|
||||||
|
|
||||||
newMu: []float64{2},
|
newMu: []float64{2},
|
||||||
newSigma: mat64.NewSymDense(1, []float64{2}),
|
newSigma: mat.NewSymDense(1, []float64{2}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4},
|
mu: []float64{2, 3, 4},
|
||||||
sigma: mat64.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}),
|
sigma: mat.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}),
|
||||||
observed: []int{1},
|
observed: []int{1},
|
||||||
values: []float64{10},
|
values: []float64{10},
|
||||||
|
|
||||||
newMu: []float64{2, 4},
|
newMu: []float64{2, 4},
|
||||||
newSigma: mat64.NewSymDense(2, []float64{2, 0, 0, 10}),
|
newSigma: mat.NewSymDense(2, []float64{2, 0, 0, 10}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4},
|
mu: []float64{2, 3, 4},
|
||||||
sigma: mat64.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}),
|
sigma: mat.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}),
|
||||||
observed: []int{0, 1},
|
observed: []int{0, 1},
|
||||||
values: []float64{10, 15},
|
values: []float64{10, 15},
|
||||||
|
|
||||||
newMu: []float64{4},
|
newMu: []float64{4},
|
||||||
newSigma: mat64.NewSymDense(1, []float64{10}),
|
newSigma: mat.NewSymDense(1, []float64{10}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4, 5},
|
mu: []float64{2, 3, 4, 5},
|
||||||
sigma: mat64.NewSymDense(4, []float64{2, 0.5, 0, 0, 0.5, 5, 0, 0, 0, 0, 10, 2, 0, 0, 2, 3}),
|
sigma: mat.NewSymDense(4, []float64{2, 0.5, 0, 0, 0.5, 5, 0, 0, 0, 0, 10, 2, 0, 0, 2, 3}),
|
||||||
observed: []int{0, 1},
|
observed: []int{0, 1},
|
||||||
values: []float64{10, 15},
|
values: []float64{10, 15},
|
||||||
|
|
||||||
newMu: []float64{4, 5},
|
newMu: []float64{4, 5},
|
||||||
newSigma: mat64.NewSymDense(2, []float64{10, 2, 2, 3}),
|
newSigma: mat.NewSymDense(2, []float64{10, 2, 2, 3}),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
normal, ok := NewNormal(test.mu, test.sigma, nil)
|
normal, ok := NewNormal(test.mu, test.sigma, nil)
|
||||||
@@ -240,9 +240,9 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
t.Errorf("Updated mean mismatch. Want %v, got %v.", test.newMu, newNormal.mu)
|
t.Errorf("Updated mean mismatch. Want %v, got %v.", test.newMu, newNormal.mu)
|
||||||
}
|
}
|
||||||
|
|
||||||
var sigma mat64.SymDense
|
var sigma mat.SymDense
|
||||||
sigma.FromCholesky(&newNormal.chol)
|
sigma.FromCholesky(&newNormal.chol)
|
||||||
if !mat64.EqualApprox(test.newSigma, &sigma, 1e-12) {
|
if !mat.EqualApprox(test.newSigma, &sigma, 1e-12) {
|
||||||
t.Errorf("Updated sigma mismatch\n.Want:\n% v\nGot:\n% v\n", test.newSigma, sigma)
|
t.Errorf("Updated sigma mismatch\n.Want:\n% v\nGot:\n% v\n", test.newSigma, sigma)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -269,7 +269,7 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
std := test.std
|
std := test.std
|
||||||
rho := test.rho
|
rho := test.rho
|
||||||
sigma := mat64.NewSymDense(2, []float64{std[0] * std[0], std[0] * std[1] * rho, std[0] * std[1] * rho, std[1] * std[1]})
|
sigma := mat.NewSymDense(2, []float64{std[0] * std[0], std[0] * std[1] * rho, std[0] * std[1] * rho, std[1] * std[1]})
|
||||||
normal, ok := NewNormal(test.mu, sigma, nil)
|
normal, ok := NewNormal(test.mu, sigma, nil)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("Bad test, original sigma not positive definite")
|
t.Fatalf("Bad test, original sigma not positive definite")
|
||||||
@@ -278,7 +278,7 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("Bad test, update failed")
|
t.Fatalf("Bad test, update failed")
|
||||||
}
|
}
|
||||||
var newSigma mat64.SymDense
|
var newSigma mat.SymDense
|
||||||
newSigma.FromCholesky(&newNormal.chol)
|
newSigma.FromCholesky(&newNormal.chol)
|
||||||
trueMean := test.mu[0] + rho*(std[0]/std[1])*(test.value-test.mu[1])
|
trueMean := test.mu[0] + rho*(std[0]/std[1])*(test.value-test.mu[1])
|
||||||
if math.Abs(trueMean-newNormal.mu[0]) > 1e-14 {
|
if math.Abs(trueMean-newNormal.mu[0]) > 1e-14 {
|
||||||
@@ -293,7 +293,7 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
// Test via sampling.
|
// Test via sampling.
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
mu []float64
|
mu []float64
|
||||||
sigma *mat64.SymDense
|
sigma *mat.SymDense
|
||||||
observed []int
|
observed []int
|
||||||
unobserved []int
|
unobserved []int
|
||||||
value []float64
|
value []float64
|
||||||
@@ -301,7 +301,7 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
// The indices in unobserved must be in ascending order for this test.
|
// The indices in unobserved must be in ascending order for this test.
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4},
|
mu: []float64{2, 3, 4},
|
||||||
sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
|
sigma: mat.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
|
||||||
|
|
||||||
observed: []int{0},
|
observed: []int{0},
|
||||||
unobserved: []int{1, 2},
|
unobserved: []int{1, 2},
|
||||||
@@ -309,7 +309,7 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4, 5},
|
mu: []float64{2, 3, 4, 5},
|
||||||
sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),
|
sigma: mat.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),
|
||||||
|
|
||||||
observed: []int{0, 3},
|
observed: []int{0, 3},
|
||||||
unobserved: []int{1, 2},
|
unobserved: []int{1, 2},
|
||||||
@@ -318,7 +318,7 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
totalSamp := 4000000
|
totalSamp := 4000000
|
||||||
var nSamp int
|
var nSamp int
|
||||||
samples := mat64.NewDense(totalSamp, len(test.mu), nil)
|
samples := mat.NewDense(totalSamp, len(test.mu), nil)
|
||||||
normal, ok := NewNormal(test.mu, test.sigma, nil)
|
normal, ok := NewNormal(test.mu, test.sigma, nil)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("bad test")
|
t.Errorf("bad test")
|
||||||
@@ -343,12 +343,12 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
t.Errorf("bad test, not enough samples")
|
t.Errorf("bad test, not enough samples")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
samples = samples.View(0, 0, nSamp, len(test.mu)).(*mat64.Dense)
|
samples = samples.View(0, 0, nSamp, len(test.mu)).(*mat.Dense)
|
||||||
|
|
||||||
// Compute mean and covariance matrix.
|
// Compute mean and covariance matrix.
|
||||||
estMean := make([]float64, len(test.mu))
|
estMean := make([]float64, len(test.mu))
|
||||||
for i := range estMean {
|
for i := range estMean {
|
||||||
estMean[i] = stat.Mean(mat64.Col(nil, i, samples), nil)
|
estMean[i] = stat.Mean(mat.Col(nil, i, samples), nil)
|
||||||
}
|
}
|
||||||
estCov := stat.CovarianceMatrix(nil, samples, nil)
|
estCov := stat.CovarianceMatrix(nil, samples, nil)
|
||||||
|
|
||||||
@@ -363,7 +363,7 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
|
|
||||||
subEstMean = append(subEstMean, estMean[v])
|
subEstMean = append(subEstMean, estMean[v])
|
||||||
}
|
}
|
||||||
subEstCov := mat64.NewSymDense(len(test.unobserved), nil)
|
subEstCov := mat.NewSymDense(len(test.unobserved), nil)
|
||||||
for i := 0; i < len(test.unobserved); i++ {
|
for i := 0; i < len(test.unobserved); i++ {
|
||||||
for j := i; j < len(test.unobserved); j++ {
|
for j := i; j < len(test.unobserved); j++ {
|
||||||
subEstCov.SetSym(i, j, estCov.At(test.unobserved[i], test.unobserved[j]))
|
subEstCov.SetSym(i, j, estCov.At(test.unobserved[i], test.unobserved[j]))
|
||||||
@@ -375,9 +375,9 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
t.Errorf("Mean mismatch. Want %v, got %v.", newNormal.mu[i], v)
|
t.Errorf("Mean mismatch. Want %v, got %v.", newNormal.mu[i], v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var sigma mat64.SymDense
|
var sigma mat.SymDense
|
||||||
sigma.FromCholesky(&newNormal.chol)
|
sigma.FromCholesky(&newNormal.chol)
|
||||||
if !mat64.EqualApprox(&sigma, subEstCov, 1e-1) {
|
if !mat.EqualApprox(&sigma, subEstCov, 1e-1) {
|
||||||
t.Errorf("Covariance mismatch. Want:\n%0.8v\nGot:\n%0.8v\n", subEstCov, sigma)
|
t.Errorf("Covariance mismatch. Want:\n%0.8v\nGot:\n%0.8v\n", subEstCov, sigma)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -386,11 +386,11 @@ func TestConditionNormal(t *testing.T) {
|
|||||||
func TestCovarianceMatrix(t *testing.T) {
|
func TestCovarianceMatrix(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
mu []float64
|
mu []float64
|
||||||
sigma *mat64.SymDense
|
sigma *mat.SymDense
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4},
|
mu: []float64{2, 3, 4},
|
||||||
sigma: mat64.NewSymDense(3, []float64{1, 0.5, 3, 0.5, 8, -1, 3, -1, 15}),
|
sigma: mat.NewSymDense(3, []float64{1, 0.5, 3, 0.5, 8, -1, 3, -1, 15}),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
normal, ok := NewNormal(test.mu, test.sigma, nil)
|
normal, ok := NewNormal(test.mu, test.sigma, nil)
|
||||||
@@ -398,13 +398,13 @@ func TestCovarianceMatrix(t *testing.T) {
|
|||||||
t.Fatalf("Bad test, covariance matrix not positive definite")
|
t.Fatalf("Bad test, covariance matrix not positive definite")
|
||||||
}
|
}
|
||||||
cov := normal.CovarianceMatrix(nil)
|
cov := normal.CovarianceMatrix(nil)
|
||||||
if !mat64.EqualApprox(cov, test.sigma, 1e-14) {
|
if !mat.EqualApprox(cov, test.sigma, 1e-14) {
|
||||||
t.Errorf("Covariance mismatch with nil input")
|
t.Errorf("Covariance mismatch with nil input")
|
||||||
}
|
}
|
||||||
dim := test.sigma.Symmetric()
|
dim := test.sigma.Symmetric()
|
||||||
cov = mat64.NewSymDense(dim, nil)
|
cov = mat.NewSymDense(dim, nil)
|
||||||
normal.CovarianceMatrix(cov)
|
normal.CovarianceMatrix(cov)
|
||||||
if !mat64.EqualApprox(cov, test.sigma, 1e-14) {
|
if !mat.EqualApprox(cov, test.sigma, 1e-14) {
|
||||||
t.Errorf("Covariance mismatch with supplied input")
|
t.Errorf("Covariance mismatch with supplied input")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -413,22 +413,22 @@ func TestCovarianceMatrix(t *testing.T) {
|
|||||||
func TestMarginal(t *testing.T) {
|
func TestMarginal(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
mu []float64
|
mu []float64
|
||||||
sigma *mat64.SymDense
|
sigma *mat.SymDense
|
||||||
marginal []int
|
marginal []int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4},
|
mu: []float64{2, 3, 4},
|
||||||
sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
|
sigma: mat.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
|
||||||
marginal: []int{0},
|
marginal: []int{0},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4},
|
mu: []float64{2, 3, 4},
|
||||||
sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
|
sigma: mat.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
|
||||||
marginal: []int{0, 2},
|
marginal: []int{0, 2},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4, 5},
|
mu: []float64{2, 3, 4, 5},
|
||||||
sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),
|
sigma: mat.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),
|
||||||
|
|
||||||
marginal: []int{0, 3},
|
marginal: []int{0, 3},
|
||||||
},
|
},
|
||||||
@@ -443,13 +443,13 @@ func TestMarginal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
dim := normal.Dim()
|
dim := normal.Dim()
|
||||||
nSamples := 1000000
|
nSamples := 1000000
|
||||||
samps := mat64.NewDense(nSamples, dim, nil)
|
samps := mat.NewDense(nSamples, dim, nil)
|
||||||
for i := 0; i < nSamples; i++ {
|
for i := 0; i < nSamples; i++ {
|
||||||
normal.Rand(samps.RawRowView(i))
|
normal.Rand(samps.RawRowView(i))
|
||||||
}
|
}
|
||||||
estMean := make([]float64, dim)
|
estMean := make([]float64, dim)
|
||||||
for i := range estMean {
|
for i := range estMean {
|
||||||
estMean[i] = stat.Mean(mat64.Col(nil, i, samps), nil)
|
estMean[i] = stat.Mean(mat.Col(nil, i, samps), nil)
|
||||||
}
|
}
|
||||||
for i, v := range test.marginal {
|
for i, v := range test.marginal {
|
||||||
if math.Abs(marginal.mu[i]-estMean[v]) > 1e-2 {
|
if math.Abs(marginal.mu[i]-estMean[v]) > 1e-2 {
|
||||||
@@ -474,15 +474,15 @@ func TestMarginal(t *testing.T) {
|
|||||||
func TestMarginalSingle(t *testing.T) {
|
func TestMarginalSingle(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
mu []float64
|
mu []float64
|
||||||
sigma *mat64.SymDense
|
sigma *mat.SymDense
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4},
|
mu: []float64{2, 3, 4},
|
||||||
sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
|
sigma: mat.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4, 5},
|
mu: []float64{2, 3, 4, 5},
|
||||||
sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),
|
sigma: mat.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
normal, ok := NewNormal(test.mu, test.sigma, nil)
|
normal, ok := NewNormal(test.mu, test.sigma, nil)
|
||||||
@@ -513,9 +513,9 @@ func TestMarginalSingle(t *testing.T) {
|
|||||||
for i := range x {
|
for i := range x {
|
||||||
x[i] = rnd.Float64()
|
x[i] = rnd.Float64()
|
||||||
}
|
}
|
||||||
mat := mat64.NewDense(dim, dim, x)
|
matrix := mat.NewDense(dim, dim, x)
|
||||||
var sigma mat64.SymDense
|
var sigma mat.SymDense
|
||||||
sigma.SymOuterK(1, mat)
|
sigma.SymOuterK(1, matrix)
|
||||||
|
|
||||||
normal, ok := NewNormal(mu, &sigma, nil)
|
normal, ok := NewNormal(mu, &sigma, nil)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkMarginalNormal10(b *testing.B) {
|
func BenchmarkMarginalNormal10(b *testing.B) {
|
||||||
@@ -61,8 +61,8 @@ func randomNormal(sz int, rnd *rand.Rand) *Normal {
|
|||||||
for i := range data {
|
for i := range data {
|
||||||
data[i] = rnd.Float64()
|
data[i] = rnd.Float64()
|
||||||
}
|
}
|
||||||
dM := mat64.NewDense(sz, sz, data)
|
dM := mat.NewDense(sz, sz, data)
|
||||||
var sigma mat64.SymDense
|
var sigma mat.SymDense
|
||||||
sigma.SymOuterK(1, dM)
|
sigma.SymOuterK(1, dM)
|
||||||
|
|
||||||
normal, ok := NewNormal(mu, &sigma, nil)
|
normal, ok := NewNormal(mu, &sigma, nil)
|
||||||
|
@@ -8,7 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -37,14 +37,14 @@ func (Bhattacharyya) DistNormal(l, r *Normal) float64 {
|
|||||||
panic(badSizeMismatch)
|
panic(badSizeMismatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
var sigma mat64.SymDense
|
var sigma mat.SymDense
|
||||||
sigma.AddSym(&l.sigma, &r.sigma)
|
sigma.AddSym(&l.sigma, &r.sigma)
|
||||||
sigma.ScaleSym(0.5, &sigma)
|
sigma.ScaleSym(0.5, &sigma)
|
||||||
|
|
||||||
var chol mat64.Cholesky
|
var chol mat.Cholesky
|
||||||
chol.Factorize(&sigma)
|
chol.Factorize(&sigma)
|
||||||
|
|
||||||
mahalanobis := stat.Mahalanobis(mat64.NewVector(dim, l.mu), mat64.NewVector(dim, r.mu), &chol)
|
mahalanobis := stat.Mahalanobis(mat.NewVector(dim, l.mu), mat.NewVector(dim, r.mu), &chol)
|
||||||
mahalanobisSq := mahalanobis * mahalanobis
|
mahalanobisSq := mahalanobis * mahalanobis
|
||||||
|
|
||||||
dl := l.chol.LogDet()
|
dl := l.chol.LogDet()
|
||||||
@@ -154,21 +154,21 @@ func (KullbackLeibler) DistNormal(l, r *Normal) float64 {
|
|||||||
panic(badSizeMismatch)
|
panic(badSizeMismatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
mahalanobis := stat.Mahalanobis(mat64.NewVector(dim, l.mu), mat64.NewVector(dim, r.mu), &r.chol)
|
mahalanobis := stat.Mahalanobis(mat.NewVector(dim, l.mu), mat.NewVector(dim, r.mu), &r.chol)
|
||||||
mahalanobisSq := mahalanobis * mahalanobis
|
mahalanobisSq := mahalanobis * mahalanobis
|
||||||
|
|
||||||
// TODO(btracey): Optimize where there is a SolveCholeskySym
|
// TODO(btracey): Optimize where there is a SolveCholeskySym
|
||||||
// TODO(btracey): There may be a more efficient way to just compute the trace
|
// TODO(btracey): There may be a more efficient way to just compute the trace
|
||||||
// Compute tr(Σ_r^-1*Σ_l) using the fact that Σ_l = U^T * U
|
// Compute tr(Σ_r^-1*Σ_l) using the fact that Σ_l = U^T * U
|
||||||
var u mat64.TriDense
|
var u mat.TriDense
|
||||||
u.UFromCholesky(&l.chol)
|
u.UFromCholesky(&l.chol)
|
||||||
var m mat64.Dense
|
var m mat.Dense
|
||||||
err := m.SolveCholesky(&r.chol, u.T())
|
err := m.SolveCholesky(&r.chol, u.T())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return math.NaN()
|
return math.NaN()
|
||||||
}
|
}
|
||||||
m.Mul(&m, &u)
|
m.Mul(&m, &u)
|
||||||
tr := mat64.Trace(&m)
|
tr := mat.Trace(&m)
|
||||||
|
|
||||||
return r.logSqrtDet - l.logSqrtDet + 0.5*(mahalanobisSq+tr-float64(l.dim))
|
return r.logSqrtDet - l.logSqrtDet + 0.5*(mahalanobisSq+tr-float64(l.dim))
|
||||||
}
|
}
|
||||||
@@ -233,20 +233,20 @@ func (Wasserstein) DistNormal(l, r *Normal) float64 {
|
|||||||
d = d * d
|
d = d * d
|
||||||
|
|
||||||
// Compute Σ_l^(1/2)
|
// Compute Σ_l^(1/2)
|
||||||
var ssl mat64.SymDense
|
var ssl mat.SymDense
|
||||||
ssl.PowPSD(&l.sigma, 0.5)
|
ssl.PowPSD(&l.sigma, 0.5)
|
||||||
// Compute Σ_l^(1/2)*Σ_r*Σ_l^(1/2)
|
// Compute Σ_l^(1/2)*Σ_r*Σ_l^(1/2)
|
||||||
var mean mat64.Dense
|
var mean mat.Dense
|
||||||
mean.Mul(&ssl, &r.sigma)
|
mean.Mul(&ssl, &r.sigma)
|
||||||
mean.Mul(&mean, &ssl)
|
mean.Mul(&mean, &ssl)
|
||||||
|
|
||||||
// Reinterpret as symdense, and take Σ^(1/2)
|
// Reinterpret as symdense, and take Σ^(1/2)
|
||||||
meanSym := mat64.NewSymDense(dim, mean.RawMatrix().Data)
|
meanSym := mat.NewSymDense(dim, mean.RawMatrix().Data)
|
||||||
ssl.PowPSD(meanSym, 0.5)
|
ssl.PowPSD(meanSym, 0.5)
|
||||||
|
|
||||||
tr := mat64.Trace(&r.sigma)
|
tr := mat.Trace(&r.sigma)
|
||||||
tl := mat64.Trace(&l.sigma)
|
tl := mat.Trace(&l.sigma)
|
||||||
tm := mat64.Trace(&ssl)
|
tm := mat.Trace(&ssl)
|
||||||
|
|
||||||
return d + tl + tr - 2*tm
|
return d + tl + tr - 2*tm
|
||||||
}
|
}
|
||||||
|
@@ -10,21 +10,21 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBhattacharyyaNormal(t *testing.T) {
|
func TestBhattacharyyaNormal(t *testing.T) {
|
||||||
for cas, test := range []struct {
|
for cas, test := range []struct {
|
||||||
am, bm []float64
|
am, bm []float64
|
||||||
ac, bc *mat64.SymDense
|
ac, bc *mat.SymDense
|
||||||
samples int
|
samples int
|
||||||
tol float64
|
tol float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
am: []float64{2, 3},
|
am: []float64{2, 3},
|
||||||
ac: mat64.NewSymDense(2, []float64{3, -1, -1, 2}),
|
ac: mat.NewSymDense(2, []float64{3, -1, -1, 2}),
|
||||||
bm: []float64{-1, 1},
|
bm: []float64{-1, 1},
|
||||||
bc: mat64.NewSymDense(2, []float64{1.5, 0.2, 0.2, 0.9}),
|
bc: mat.NewSymDense(2, []float64{1.5, 0.2, 0.2, 0.9}),
|
||||||
samples: 100000,
|
samples: 100000,
|
||||||
tol: 1e-2,
|
tol: 1e-2,
|
||||||
},
|
},
|
||||||
@@ -105,15 +105,15 @@ func bhattacharyyaSample(dim, samples int, l RandLogProber, r LogProber) float64
|
|||||||
func TestCrossEntropyNormal(t *testing.T) {
|
func TestCrossEntropyNormal(t *testing.T) {
|
||||||
for cas, test := range []struct {
|
for cas, test := range []struct {
|
||||||
am, bm []float64
|
am, bm []float64
|
||||||
ac, bc *mat64.SymDense
|
ac, bc *mat.SymDense
|
||||||
samples int
|
samples int
|
||||||
tol float64
|
tol float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
am: []float64{2, 3},
|
am: []float64{2, 3},
|
||||||
ac: mat64.NewSymDense(2, []float64{3, -1, -1, 2}),
|
ac: mat.NewSymDense(2, []float64{3, -1, -1, 2}),
|
||||||
bm: []float64{-1, 1},
|
bm: []float64{-1, 1},
|
||||||
bc: mat64.NewSymDense(2, []float64{1.5, 0.2, 0.2, 0.9}),
|
bc: mat.NewSymDense(2, []float64{1.5, 0.2, 0.2, 0.9}),
|
||||||
samples: 100000,
|
samples: 100000,
|
||||||
tol: 1e-2,
|
tol: 1e-2,
|
||||||
},
|
},
|
||||||
@@ -144,15 +144,15 @@ func TestCrossEntropyNormal(t *testing.T) {
|
|||||||
func TestHellingerNormal(t *testing.T) {
|
func TestHellingerNormal(t *testing.T) {
|
||||||
for cas, test := range []struct {
|
for cas, test := range []struct {
|
||||||
am, bm []float64
|
am, bm []float64
|
||||||
ac, bc *mat64.SymDense
|
ac, bc *mat.SymDense
|
||||||
samples int
|
samples int
|
||||||
tol float64
|
tol float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
am: []float64{2, 3},
|
am: []float64{2, 3},
|
||||||
ac: mat64.NewSymDense(2, []float64{3, -1, -1, 2}),
|
ac: mat.NewSymDense(2, []float64{3, -1, -1, 2}),
|
||||||
bm: []float64{-1, 1},
|
bm: []float64{-1, 1},
|
||||||
bc: mat64.NewSymDense(2, []float64{1.5, 0.2, 0.2, 0.9}),
|
bc: mat.NewSymDense(2, []float64{1.5, 0.2, 0.2, 0.9}),
|
||||||
samples: 100000,
|
samples: 100000,
|
||||||
tol: 5e-1,
|
tol: 5e-1,
|
||||||
},
|
},
|
||||||
@@ -188,15 +188,15 @@ func TestHellingerNormal(t *testing.T) {
|
|||||||
func TestKullbackLeiblerNormal(t *testing.T) {
|
func TestKullbackLeiblerNormal(t *testing.T) {
|
||||||
for cas, test := range []struct {
|
for cas, test := range []struct {
|
||||||
am, bm []float64
|
am, bm []float64
|
||||||
ac, bc *mat64.SymDense
|
ac, bc *mat.SymDense
|
||||||
samples int
|
samples int
|
||||||
tol float64
|
tol float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
am: []float64{2, 3},
|
am: []float64{2, 3},
|
||||||
ac: mat64.NewSymDense(2, []float64{3, -1, -1, 2}),
|
ac: mat.NewSymDense(2, []float64{3, -1, -1, 2}),
|
||||||
bm: []float64{-1, 1},
|
bm: []float64{-1, 1},
|
||||||
bc: mat64.NewSymDense(2, []float64{1.5, 0.2, 0.2, 0.9}),
|
bc: mat.NewSymDense(2, []float64{1.5, 0.2, 0.2, 0.9}),
|
||||||
samples: 10000,
|
samples: 10000,
|
||||||
tol: 1e-2,
|
tol: 1e-2,
|
||||||
},
|
},
|
||||||
|
@@ -12,7 +12,7 @@ import (
|
|||||||
"golang.org/x/tools/container/intsets"
|
"golang.org/x/tools/container/intsets"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat/distuv"
|
"gonum.org/v1/gonum/stat/distuv"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,10 +35,10 @@ type StudentsT struct {
|
|||||||
mu []float64
|
mu []float64
|
||||||
src *rand.Rand
|
src *rand.Rand
|
||||||
|
|
||||||
sigma mat64.SymDense // only stored if needed
|
sigma mat.SymDense // only stored if needed
|
||||||
|
|
||||||
chol mat64.Cholesky
|
chol mat.Cholesky
|
||||||
lower mat64.TriDense
|
lower mat.TriDense
|
||||||
logSqrtDet float64
|
logSqrtDet float64
|
||||||
dim int
|
dim int
|
||||||
}
|
}
|
||||||
@@ -48,7 +48,7 @@ type StudentsT struct {
|
|||||||
//
|
//
|
||||||
// NewStudentsT panics if len(mu) == 0, or if len(mu) != sigma.Symmetric(). If
|
// NewStudentsT panics if len(mu) == 0, or if len(mu) != sigma.Symmetric(). If
|
||||||
// the covariance matrix is not positive-definite, nil is returned and ok is false.
|
// the covariance matrix is not positive-definite, nil is returned and ok is false.
|
||||||
func NewStudentsT(mu []float64, sigma mat64.Symmetric, nu float64, src *rand.Rand) (dist *StudentsT, ok bool) {
|
func NewStudentsT(mu []float64, sigma mat.Symmetric, nu float64, src *rand.Rand) (dist *StudentsT, ok bool) {
|
||||||
if len(mu) == 0 {
|
if len(mu) == 0 {
|
||||||
panic(badZeroDimension)
|
panic(badZeroDimension)
|
||||||
}
|
}
|
||||||
@@ -69,7 +69,7 @@ func NewStudentsT(mu []float64, sigma mat64.Symmetric, nu float64, src *rand.Ran
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
s.sigma = *mat64.NewSymDense(dim, nil)
|
s.sigma = *mat.NewSymDense(dim, nil)
|
||||||
s.sigma.CopySym(sigma)
|
s.sigma.CopySym(sigma)
|
||||||
s.lower.LFromCholesky(&s.chol)
|
s.lower.LFromCholesky(&s.chol)
|
||||||
s.logSqrtDet = 0.5 * s.chol.LogDet()
|
s.logSqrtDet = 0.5 * s.chol.LogDet()
|
||||||
@@ -113,7 +113,7 @@ func (s *StudentsT) ConditionStudentsT(observed []int, values []float64, src *ra
|
|||||||
// studentsTConditional updates a Student's T distribution based on the observed samples
|
// studentsTConditional updates a Student's T distribution based on the observed samples
|
||||||
// (see documentation for the public function). The Gaussian conditional update
|
// (see documentation for the public function). The Gaussian conditional update
|
||||||
// is treated as a special case when nu == math.Inf(1).
|
// is treated as a special case when nu == math.Inf(1).
|
||||||
func studentsTConditional(observed []int, values []float64, nu float64, mu []float64, sigma mat64.Symmetric) (newNu float64, newMean []float64, newSigma *mat64.SymDense) {
|
func studentsTConditional(observed []int, values []float64, nu float64, mu []float64, sigma mat.Symmetric) (newNu float64, newMean []float64, newSigma *mat.SymDense) {
|
||||||
dim := len(mu)
|
dim := len(mu)
|
||||||
ob := len(observed)
|
ob := len(observed)
|
||||||
|
|
||||||
@@ -133,11 +133,11 @@ func studentsTConditional(observed []int, values []float64, nu float64, mu []flo
|
|||||||
mu2[i] = values[i] - mu[v]
|
mu2[i] = values[i] - mu[v]
|
||||||
}
|
}
|
||||||
|
|
||||||
var sigma11, sigma22 mat64.SymDense
|
var sigma11, sigma22 mat.SymDense
|
||||||
sigma11.SubsetSym(sigma, unobserved)
|
sigma11.SubsetSym(sigma, unobserved)
|
||||||
sigma22.SubsetSym(sigma, observed)
|
sigma22.SubsetSym(sigma, observed)
|
||||||
|
|
||||||
sigma21 := mat64.NewDense(ob, unob, nil)
|
sigma21 := mat.NewDense(ob, unob, nil)
|
||||||
for i, r := range observed {
|
for i, r := range observed {
|
||||||
for j, c := range unobserved {
|
for j, c := range unobserved {
|
||||||
v := sigma.At(r, c)
|
v := sigma.At(r, c)
|
||||||
@@ -145,15 +145,15 @@ func studentsTConditional(observed []int, values []float64, nu float64, mu []flo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var chol mat64.Cholesky
|
var chol mat.Cholesky
|
||||||
ok := chol.Factorize(&sigma22)
|
ok := chol.Factorize(&sigma22)
|
||||||
if !ok {
|
if !ok {
|
||||||
return math.NaN(), nil, nil
|
return math.NaN(), nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute mu_1 + sigma_{2,1}^T * sigma_{2,2}^-1 (v - mu_2).
|
// Compute mu_1 + sigma_{2,1}^T * sigma_{2,2}^-1 (v - mu_2).
|
||||||
v := mat64.NewVector(ob, mu2)
|
v := mat.NewVector(ob, mu2)
|
||||||
var tmp, tmp2 mat64.Vector
|
var tmp, tmp2 mat.Vector
|
||||||
err := tmp.SolveCholeskyVec(&chol, v)
|
err := tmp.SolveCholeskyVec(&chol, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return math.NaN(), nil, nil
|
return math.NaN(), nil, nil
|
||||||
@@ -166,7 +166,7 @@ func studentsTConditional(observed []int, values []float64, nu float64, mu []flo
|
|||||||
|
|
||||||
// Compute tmp4 = sigma_{2,1}^T * sigma_{2,2}^-1 * sigma_{2,1}.
|
// Compute tmp4 = sigma_{2,1}^T * sigma_{2,2}^-1 * sigma_{2,1}.
|
||||||
// TODO(btracey): Should this be a method of SymDense?
|
// TODO(btracey): Should this be a method of SymDense?
|
||||||
var tmp3, tmp4 mat64.Dense
|
var tmp3, tmp4 mat.Dense
|
||||||
err = tmp3.SolveCholesky(&chol, sigma21)
|
err = tmp3.SolveCholesky(&chol, sigma21)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return math.NaN(), nil, nil
|
return math.NaN(), nil, nil
|
||||||
@@ -189,7 +189,7 @@ func studentsTConditional(observed []int, values []float64, nu float64, mu []flo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Compute beta = (v - mu_2)^T * sigma_{2,2}^-1 * (v - mu_2)^T
|
// Compute beta = (v - mu_2)^T * sigma_{2,2}^-1 * (v - mu_2)^T
|
||||||
beta := mat64.Dot(v, &tmp)
|
beta := mat.Dot(v, &tmp)
|
||||||
|
|
||||||
// Scale the covariance matrix
|
// Scale the covariance matrix
|
||||||
sigma11.ScaleSym((nu+beta)/(nu+float64(ob)), &sigma11)
|
sigma11.ScaleSym((nu+beta)/(nu+float64(ob)), &sigma11)
|
||||||
@@ -221,9 +221,9 @@ func findUnob(observed []int, dim int) (unobserved []int) {
|
|||||||
// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])]
|
// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])]
|
||||||
// If the input matrix is nil a new matrix is allocated, otherwise the result
|
// If the input matrix is nil a new matrix is allocated, otherwise the result
|
||||||
// is stored in-place into the input.
|
// is stored in-place into the input.
|
||||||
func (st *StudentsT) CovarianceMatrix(s *mat64.SymDense) *mat64.SymDense {
|
func (st *StudentsT) CovarianceMatrix(s *mat.SymDense) *mat.SymDense {
|
||||||
if s == nil {
|
if s == nil {
|
||||||
s = mat64.NewSymDense(st.dim, nil)
|
s = mat.NewSymDense(st.dim, nil)
|
||||||
}
|
}
|
||||||
sn := s.Symmetric()
|
sn := s.Symmetric()
|
||||||
if sn != st.dim {
|
if sn != st.dim {
|
||||||
@@ -256,12 +256,12 @@ func (s *StudentsT) LogProb(y []float64) float64 {
|
|||||||
copy(shift, y)
|
copy(shift, y)
|
||||||
floats.Sub(shift, s.mu)
|
floats.Sub(shift, s.mu)
|
||||||
|
|
||||||
x := mat64.NewVector(s.dim, shift)
|
x := mat.NewVector(s.dim, shift)
|
||||||
|
|
||||||
var tmp mat64.Vector
|
var tmp mat.Vector
|
||||||
tmp.SolveCholeskyVec(&s.chol, x)
|
tmp.SolveCholeskyVec(&s.chol, x)
|
||||||
|
|
||||||
dot := mat64.Dot(&tmp, x)
|
dot := mat.Dot(&tmp, x)
|
||||||
|
|
||||||
return t1 - ((nu+n)/2)*math.Log(1+dot/nu)
|
return t1 - ((nu+n)/2)*math.Log(1+dot/nu)
|
||||||
}
|
}
|
||||||
@@ -283,7 +283,7 @@ func (s *StudentsT) MarginalStudentsT(vars []int, src *rand.Rand) (dist *Student
|
|||||||
for i, v := range vars {
|
for i, v := range vars {
|
||||||
newMean[i] = s.mu[v]
|
newMean[i] = s.mu[v]
|
||||||
}
|
}
|
||||||
var newSigma mat64.SymDense
|
var newSigma mat.SymDense
|
||||||
newSigma.SubsetSym(&s.sigma, vars)
|
newSigma.SubsetSym(&s.sigma, vars)
|
||||||
return NewStudentsT(newMean, &newSigma, s.nu, src)
|
return NewStudentsT(newMean, &newSigma, s.nu, src)
|
||||||
}
|
}
|
||||||
@@ -342,8 +342,8 @@ func (s *StudentsT) Rand(x []float64) []float64 {
|
|||||||
tmp[i] = s.src.NormFloat64()
|
tmp[i] = s.src.NormFloat64()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
xVec := mat64.NewVector(s.dim, x)
|
xVec := mat.NewVector(s.dim, x)
|
||||||
tmpVec := mat64.NewVector(s.dim, tmp)
|
tmpVec := mat.NewVector(s.dim, tmp)
|
||||||
xVec.MulVec(&s.lower, tmpVec)
|
xVec.MulVec(&s.lower, tmpVec)
|
||||||
|
|
||||||
u := distuv.ChiSquared{K: s.nu, Src: s.src}.Rand()
|
u := distuv.ChiSquared{K: s.nu, Src: s.src}.Rand()
|
||||||
|
@@ -10,7 +10,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ func TestStudentTProbs(t *testing.T) {
|
|||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
nu float64
|
nu float64
|
||||||
mu []float64
|
mu []float64
|
||||||
sigma *mat64.SymDense
|
sigma *mat.SymDense
|
||||||
|
|
||||||
x [][]float64
|
x [][]float64
|
||||||
probs []float64
|
probs []float64
|
||||||
@@ -27,7 +27,7 @@ func TestStudentTProbs(t *testing.T) {
|
|||||||
{
|
{
|
||||||
nu: 3,
|
nu: 3,
|
||||||
mu: []float64{0, 0},
|
mu: []float64{0, 0},
|
||||||
sigma: mat64.NewSymDense(2, []float64{1, 0, 0, 1}),
|
sigma: mat.NewSymDense(2, []float64{1, 0, 0, 1}),
|
||||||
|
|
||||||
x: [][]float64{
|
x: [][]float64{
|
||||||
{0, 0},
|
{0, 0},
|
||||||
@@ -46,7 +46,7 @@ func TestStudentTProbs(t *testing.T) {
|
|||||||
{
|
{
|
||||||
nu: 4,
|
nu: 4,
|
||||||
mu: []float64{2, -3},
|
mu: []float64{2, -3},
|
||||||
sigma: mat64.NewSymDense(2, []float64{8, -1, -1, 5}),
|
sigma: mat.NewSymDense(2, []float64{8, -1, -1, 5}),
|
||||||
|
|
||||||
x: [][]float64{
|
x: [][]float64{
|
||||||
{0, 0},
|
{0, 0},
|
||||||
@@ -87,25 +87,25 @@ func TestStudentsTRand(t *testing.T) {
|
|||||||
src := rand.New(rand.NewSource(1))
|
src := rand.New(rand.NewSource(1))
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
mean []float64
|
mean []float64
|
||||||
cov *mat64.SymDense
|
cov *mat.SymDense
|
||||||
nu float64
|
nu float64
|
||||||
tolcov float64
|
tolcov float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
mean: []float64{0, 0},
|
mean: []float64{0, 0},
|
||||||
cov: mat64.NewSymDense(2, []float64{1, 0, 0, 1}),
|
cov: mat.NewSymDense(2, []float64{1, 0, 0, 1}),
|
||||||
nu: 3,
|
nu: 3,
|
||||||
tolcov: 1e-2,
|
tolcov: 1e-2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mean: []float64{3, 4},
|
mean: []float64{3, 4},
|
||||||
cov: mat64.NewSymDense(2, []float64{5, 1.2, 1.2, 6}),
|
cov: mat.NewSymDense(2, []float64{5, 1.2, 1.2, 6}),
|
||||||
nu: 8,
|
nu: 8,
|
||||||
tolcov: 1e-2,
|
tolcov: 1e-2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mean: []float64{3, 4, -2},
|
mean: []float64{3, 4, -2},
|
||||||
cov: mat64.NewSymDense(3, []float64{5, 1.2, -0.8, 1.2, 6, 0.4, -0.8, 0.4, 2}),
|
cov: mat.NewSymDense(3, []float64{5, 1.2, -0.8, 1.2, 6, 0.4, -0.8, 0.4, 2}),
|
||||||
nu: 8,
|
nu: 8,
|
||||||
tolcov: 1e-2,
|
tolcov: 1e-2,
|
||||||
},
|
},
|
||||||
@@ -116,13 +116,13 @@ func TestStudentsTRand(t *testing.T) {
|
|||||||
}
|
}
|
||||||
nSamples := 10000000
|
nSamples := 10000000
|
||||||
dim := len(test.mean)
|
dim := len(test.mean)
|
||||||
samps := mat64.NewDense(nSamples, dim, nil)
|
samps := mat.NewDense(nSamples, dim, nil)
|
||||||
for i := 0; i < nSamples; i++ {
|
for i := 0; i < nSamples; i++ {
|
||||||
s.Rand(samps.RawRowView(i))
|
s.Rand(samps.RawRowView(i))
|
||||||
}
|
}
|
||||||
estMean := make([]float64, dim)
|
estMean := make([]float64, dim)
|
||||||
for i := range estMean {
|
for i := range estMean {
|
||||||
estMean[i] = stat.Mean(mat64.Col(nil, i, samps), nil)
|
estMean[i] = stat.Mean(mat.Col(nil, i, samps), nil)
|
||||||
}
|
}
|
||||||
mean := s.Mean(nil)
|
mean := s.Mean(nil)
|
||||||
if !floats.EqualApprox(estMean, mean, 1e-2) {
|
if !floats.EqualApprox(estMean, mean, 1e-2) {
|
||||||
@@ -130,7 +130,7 @@ func TestStudentsTRand(t *testing.T) {
|
|||||||
}
|
}
|
||||||
cov := s.CovarianceMatrix(nil)
|
cov := s.CovarianceMatrix(nil)
|
||||||
estCov := stat.CovarianceMatrix(nil, samps, nil)
|
estCov := stat.CovarianceMatrix(nil, samps, nil)
|
||||||
if !mat64.EqualApprox(estCov, cov, test.tolcov) {
|
if !mat.EqualApprox(estCov, cov, test.tolcov) {
|
||||||
t.Errorf("Cov mismatch: want: %v, got %v", cov, estCov)
|
t.Errorf("Cov mismatch: want: %v, got %v", cov, estCov)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -140,7 +140,7 @@ func TestStudentsTConditional(t *testing.T) {
|
|||||||
src := rand.New(rand.NewSource(1))
|
src := rand.New(rand.NewSource(1))
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
mean []float64
|
mean []float64
|
||||||
cov *mat64.SymDense
|
cov *mat.SymDense
|
||||||
nu float64
|
nu float64
|
||||||
|
|
||||||
idx []int
|
idx []int
|
||||||
@@ -149,7 +149,7 @@ func TestStudentsTConditional(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
mean: []float64{3, 4, -2},
|
mean: []float64{3, 4, -2},
|
||||||
cov: mat64.NewSymDense(3, []float64{5, 1.2, -0.8, 1.2, 6, 0.4, -0.8, 0.4, 2}),
|
cov: mat.NewSymDense(3, []float64{5, 1.2, -0.8, 1.2, 6, 0.4, -0.8, 0.4, 2}),
|
||||||
nu: 8,
|
nu: 8,
|
||||||
idx: []int{0},
|
idx: []int{0},
|
||||||
value: []float64{6},
|
value: []float64{6},
|
||||||
@@ -182,11 +182,11 @@ func TestStudentsTConditional(t *testing.T) {
|
|||||||
muOb[i] = test.mean[v]
|
muOb[i] = test.mean[v]
|
||||||
}
|
}
|
||||||
|
|
||||||
var sig11, sig22 mat64.SymDense
|
var sig11, sig22 mat.SymDense
|
||||||
sig11.SubsetSym(&s.sigma, unob)
|
sig11.SubsetSym(&s.sigma, unob)
|
||||||
sig22.SubsetSym(&s.sigma, ob)
|
sig22.SubsetSym(&s.sigma, ob)
|
||||||
|
|
||||||
sig12 := mat64.NewDense(len(unob), len(ob), nil)
|
sig12 := mat.NewDense(len(unob), len(ob), nil)
|
||||||
for i := range unob {
|
for i := range unob {
|
||||||
for j := range ob {
|
for j := range ob {
|
||||||
sig12.Set(i, j, s.sigma.At(unob[i], ob[j]))
|
sig12.Set(i, j, s.sigma.At(unob[i], ob[j]))
|
||||||
@@ -198,9 +198,9 @@ func TestStudentsTConditional(t *testing.T) {
|
|||||||
floats.Sub(shift, muOb)
|
floats.Sub(shift, muOb)
|
||||||
|
|
||||||
newMu := make([]float64, len(muUnob))
|
newMu := make([]float64, len(muUnob))
|
||||||
newMuVec := mat64.NewVector(len(muUnob), newMu)
|
newMuVec := mat.NewVector(len(muUnob), newMu)
|
||||||
shiftVec := mat64.NewVector(len(shift), shift)
|
shiftVec := mat.NewVector(len(shift), shift)
|
||||||
var tmp mat64.Vector
|
var tmp mat.Vector
|
||||||
tmp.SolveVec(&sig22, shiftVec)
|
tmp.SolveVec(&sig22, shiftVec)
|
||||||
newMuVec.MulVec(sig12, &tmp)
|
newMuVec.MulVec(sig12, &tmp)
|
||||||
floats.Add(newMu, muUnob)
|
floats.Add(newMu, muUnob)
|
||||||
@@ -209,16 +209,16 @@ func TestStudentsTConditional(t *testing.T) {
|
|||||||
t.Errorf("Mu mismatch. Got %v, want %v", sUp.mu, newMu)
|
t.Errorf("Mu mismatch. Got %v, want %v", sUp.mu, newMu)
|
||||||
}
|
}
|
||||||
|
|
||||||
var tmp2 mat64.Dense
|
var tmp2 mat.Dense
|
||||||
tmp2.Solve(&sig22, sig12.T())
|
tmp2.Solve(&sig22, sig12.T())
|
||||||
|
|
||||||
var tmp3 mat64.Dense
|
var tmp3 mat.Dense
|
||||||
tmp3.Mul(sig12, &tmp2)
|
tmp3.Mul(sig12, &tmp2)
|
||||||
tmp3.Sub(&sig11, &tmp3)
|
tmp3.Sub(&sig11, &tmp3)
|
||||||
|
|
||||||
dot := mat64.Dot(shiftVec, &tmp)
|
dot := mat.Dot(shiftVec, &tmp)
|
||||||
tmp3.Scale((test.nu+dot)/(test.nu+float64(len(ob))), &tmp3)
|
tmp3.Scale((test.nu+dot)/(test.nu+float64(len(ob))), &tmp3)
|
||||||
if !mat64.EqualApprox(&tmp3, &sUp.sigma, 1e-10) {
|
if !mat.EqualApprox(&tmp3, &sUp.sigma, 1e-10) {
|
||||||
t.Errorf("Sigma mismatch")
|
t.Errorf("Sigma mismatch")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -227,17 +227,17 @@ func TestStudentsTConditional(t *testing.T) {
|
|||||||
func TestStudentsTMarginalSingle(t *testing.T) {
|
func TestStudentsTMarginalSingle(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
mu []float64
|
mu []float64
|
||||||
sigma *mat64.SymDense
|
sigma *mat.SymDense
|
||||||
nu float64
|
nu float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4},
|
mu: []float64{2, 3, 4},
|
||||||
sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
|
sigma: mat.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
|
||||||
nu: 5,
|
nu: 5,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mu: []float64{2, 3, 4, 5},
|
mu: []float64{2, 3, 4, 5},
|
||||||
sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),
|
sigma: mat.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),
|
||||||
nu: 6,
|
nu: 6,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@@ -9,8 +9,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// PC is a type for computing and extracting the principal components of a
|
// PC is a type for computing and extracting the principal components of a
|
||||||
@@ -19,7 +18,7 @@ import (
|
|||||||
type PC struct {
|
type PC struct {
|
||||||
n, d int
|
n, d int
|
||||||
weights []float64
|
weights []float64
|
||||||
svd *mat64.SVD
|
svd *mat.SVD
|
||||||
ok bool
|
ok bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -34,7 +33,7 @@ type PC struct {
|
|||||||
// must match the number of observations or PrincipalComponents will panic.
|
// must match the number of observations or PrincipalComponents will panic.
|
||||||
//
|
//
|
||||||
// PrincipalComponents returns whether the analysis was successful.
|
// PrincipalComponents returns whether the analysis was successful.
|
||||||
func (c *PC) PrincipalComponents(a mat64.Matrix, weights []float64) (ok bool) {
|
func (c *PC) PrincipalComponents(a mat.Matrix, weights []float64) (ok bool) {
|
||||||
c.n, c.d = a.Dims()
|
c.n, c.d = a.Dims()
|
||||||
if weights != nil && len(weights) != c.n {
|
if weights != nil && len(weights) != c.n {
|
||||||
panic("stat: len(weights) != observations")
|
panic("stat: len(weights) != observations")
|
||||||
@@ -51,15 +50,15 @@ func (c *PC) PrincipalComponents(a mat64.Matrix, weights []float64) (ok bool) {
|
|||||||
// analysis. The vectors are returned in the columns of a d×min(n, d) matrix.
|
// analysis. The vectors are returned in the columns of a d×min(n, d) matrix.
|
||||||
// If dst is not nil it must either be zero-sized or be a d×min(n, d) matrix.
|
// If dst is not nil it must either be zero-sized or be a d×min(n, d) matrix.
|
||||||
// dst will be used as the destination for the direction vector data. If dst
|
// dst will be used as the destination for the direction vector data. If dst
|
||||||
// is nil, a new mat64.Dense is allocated for the destination.
|
// is nil, a new mat.Dense is allocated for the destination.
|
||||||
func (c *PC) Vectors(dst *mat64.Dense) *mat64.Dense {
|
func (c *PC) Vectors(dst *mat.Dense) *mat.Dense {
|
||||||
if !c.ok {
|
if !c.ok {
|
||||||
panic("stat: use of unsuccessful principal components analysis")
|
panic("stat: use of unsuccessful principal components analysis")
|
||||||
}
|
}
|
||||||
|
|
||||||
if dst != nil {
|
if dst != nil {
|
||||||
if d, n := dst.Dims(); (n != 0 || d != 0) && (d != c.d || n != min(c.n, c.d)) {
|
if d, n := dst.Dims(); (n != 0 || d != 0) && (d != c.d || n != min(c.n, c.d)) {
|
||||||
panic(matrix.ErrShape)
|
panic(mat.ErrShape)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return c.svd.VTo(dst)
|
return c.svd.VTo(dst)
|
||||||
@@ -110,7 +109,7 @@ type CC struct {
|
|||||||
// xd and yd are used for size checks.
|
// xd and yd are used for size checks.
|
||||||
xd, yd int
|
xd, yd int
|
||||||
|
|
||||||
x, y, c *mat64.SVD
|
x, y, c *mat.SVD
|
||||||
ok bool
|
ok bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -162,7 +161,7 @@ type CC struct {
|
|||||||
// or in Chapter 3 of
|
// or in Chapter 3 of
|
||||||
// Koch, Inge. Analysis of multivariate and high-dimensional data.
|
// Koch, Inge. Analysis of multivariate and high-dimensional data.
|
||||||
// Vol. 32. Cambridge University Press, 2013. ISBN: 9780521887939
|
// Vol. 32. Cambridge University Press, 2013. ISBN: 9780521887939
|
||||||
func (c *CC) CanonicalCorrelations(x, y mat64.Matrix, weights []float64) error {
|
func (c *CC) CanonicalCorrelations(x, y mat.Matrix, weights []float64) error {
|
||||||
var yn int
|
var yn int
|
||||||
c.n, c.xd = x.Dims()
|
c.n, c.xd = x.Dims()
|
||||||
yn, c.yd = y.Dims()
|
yn, c.yd = y.Dims()
|
||||||
@@ -188,12 +187,12 @@ func (c *CC) CanonicalCorrelations(x, y mat64.Matrix, weights []float64) error {
|
|||||||
yv := c.y.VTo(nil)
|
yv := c.y.VTo(nil)
|
||||||
|
|
||||||
// Calculate and factorise the canonical correlation matrix.
|
// Calculate and factorise the canonical correlation matrix.
|
||||||
var ccor mat64.Dense
|
var ccor mat.Dense
|
||||||
ccor.Product(xv, xu.T(), yu, yv.T())
|
ccor.Product(xv, xu.T(), yu, yv.T())
|
||||||
if c.c == nil {
|
if c.c == nil {
|
||||||
c.c = &mat64.SVD{}
|
c.c = &mat.SVD{}
|
||||||
}
|
}
|
||||||
c.ok = c.c.Factorize(&ccor, matrix.SVDThin)
|
c.ok = c.c.Factorize(&ccor, mat.SVDThin)
|
||||||
if !c.ok {
|
if !c.ok {
|
||||||
return errors.New("stat: failed to factorize ccor")
|
return errors.New("stat: failed to factorize ccor")
|
||||||
}
|
}
|
||||||
@@ -220,15 +219,15 @@ func (c *CC) Corrs(dst []float64) []float64 {
|
|||||||
// If dst is not nil it must either be zero-sized or be an xd×yd matrix where xd
|
// If dst is not nil it must either be zero-sized or be an xd×yd matrix where xd
|
||||||
// and yd are the number of variables in the input x and y matrices. dst will
|
// and yd are the number of variables in the input x and y matrices. dst will
|
||||||
// be used as the destination for the vector data. If dst is nil, a new
|
// be used as the destination for the vector data. If dst is nil, a new
|
||||||
// mat64.Dense is allocated for the destination.
|
// mat.Dense is allocated for the destination.
|
||||||
func (c *CC) Left(dst *mat64.Dense, spheredSpace bool) *mat64.Dense {
|
func (c *CC) Left(dst *mat.Dense, spheredSpace bool) *mat.Dense {
|
||||||
if !c.ok || c.n < 2 {
|
if !c.ok || c.n < 2 {
|
||||||
panic("stat: canonical correlations missing or invalid")
|
panic("stat: canonical correlations missing or invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
if dst != nil {
|
if dst != nil {
|
||||||
if d, n := dst.Dims(); (n != 0 || d != 0) && (n != c.yd || d != c.xd) {
|
if d, n := dst.Dims(); (n != 0 || d != 0) && (n != c.yd || d != c.xd) {
|
||||||
panic(matrix.ErrShape)
|
panic(mat.ErrShape)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dst = c.c.UTo(dst)
|
dst = c.c.UTo(dst)
|
||||||
@@ -252,15 +251,15 @@ func (c *CC) Left(dst *mat64.Dense, spheredSpace bool) *mat64.Dense {
|
|||||||
// If dst is not nil it must either be zero-sized or be an yd×yd matrix where yd
|
// If dst is not nil it must either be zero-sized or be an yd×yd matrix where yd
|
||||||
// is the number of variables in the input y matrix. dst will
|
// is the number of variables in the input y matrix. dst will
|
||||||
// be used as the destination for the vector data. If dst is nil, a new
|
// be used as the destination for the vector data. If dst is nil, a new
|
||||||
// mat64.Dense is allocated for the destination.
|
// mat.Dense is allocated for the destination.
|
||||||
func (c *CC) Right(dst *mat64.Dense, spheredSpace bool) *mat64.Dense {
|
func (c *CC) Right(dst *mat.Dense, spheredSpace bool) *mat.Dense {
|
||||||
if !c.ok || c.n < 2 {
|
if !c.ok || c.n < 2 {
|
||||||
panic("stat: canonical correlations missing or invalid")
|
panic("stat: canonical correlations missing or invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
if dst != nil {
|
if dst != nil {
|
||||||
if d, n := dst.Dims(); (n != 0 || d != 0) && (n != c.yd || d != c.yd) {
|
if d, n := dst.Dims(); (n != 0 || d != 0) && (n != c.yd || d != c.yd) {
|
||||||
panic(matrix.ErrShape)
|
panic(mat.ErrShape)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dst = c.c.VTo(dst)
|
dst = c.c.VTo(dst)
|
||||||
@@ -278,12 +277,12 @@ func (c *CC) Right(dst *mat64.Dense, spheredSpace bool) *mat64.Dense {
|
|||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
func svdFactorizeCentered(work *mat64.SVD, m mat64.Matrix, weights []float64) (svd *mat64.SVD, ok bool) {
|
func svdFactorizeCentered(work *mat.SVD, m mat.Matrix, weights []float64) (svd *mat.SVD, ok bool) {
|
||||||
n, d := m.Dims()
|
n, d := m.Dims()
|
||||||
centered := mat64.NewDense(n, d, nil)
|
centered := mat.NewDense(n, d, nil)
|
||||||
col := make([]float64, n)
|
col := make([]float64, n)
|
||||||
for j := 0; j < d; j++ {
|
for j := 0; j < d; j++ {
|
||||||
mat64.Col(col, j, m)
|
mat.Col(col, j, m)
|
||||||
floats.AddConst(-Mean(col, weights), col)
|
floats.AddConst(-Mean(col, weights), col)
|
||||||
centered.SetCol(j, col)
|
centered.SetCol(j, col)
|
||||||
}
|
}
|
||||||
@@ -291,15 +290,15 @@ func svdFactorizeCentered(work *mat64.SVD, m mat64.Matrix, weights []float64) (s
|
|||||||
floats.Scale(math.Sqrt(w), centered.RawRowView(i))
|
floats.Scale(math.Sqrt(w), centered.RawRowView(i))
|
||||||
}
|
}
|
||||||
if work == nil {
|
if work == nil {
|
||||||
work = &mat64.SVD{}
|
work = &mat.SVD{}
|
||||||
}
|
}
|
||||||
ok = work.Factorize(centered, matrix.SVDThin)
|
ok = work.Factorize(centered, mat.SVDThin)
|
||||||
return work, ok
|
return work, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// scaleColsReciSqrt scales the columns of cols
|
// scaleColsReciSqrt scales the columns of cols
|
||||||
// by the reciprocal square-root of vals.
|
// by the reciprocal square-root of vals.
|
||||||
func scaleColsReciSqrt(cols *mat64.Dense, vals []float64) {
|
func scaleColsReciSqrt(cols *mat.Dense, vals []float64) {
|
||||||
if cols == nil {
|
if cols == nil {
|
||||||
panic("stat: input nil")
|
panic("stat: input nil")
|
||||||
}
|
}
|
||||||
@@ -309,7 +308,7 @@ func scaleColsReciSqrt(cols *mat64.Dense, vals []float64) {
|
|||||||
}
|
}
|
||||||
col := make([]float64, n)
|
col := make([]float64, n)
|
||||||
for j := 0; j < d; j++ {
|
for j := 0; j < d; j++ {
|
||||||
mat64.Col(col, j, cols)
|
mat.Col(col, j, cols)
|
||||||
floats.Scale(math.Sqrt(1/vals[j]), col)
|
floats.Scale(math.Sqrt(1/vals[j]), col)
|
||||||
cols.SetCol(j, col)
|
cols.SetCol(j, col)
|
||||||
}
|
}
|
||||||
|
@@ -7,7 +7,7 @@ package stat_test
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -15,7 +15,7 @@ func ExamplePrincipalComponents() {
|
|||||||
// iris is a truncated sample of the Fisher's Iris dataset.
|
// iris is a truncated sample of the Fisher's Iris dataset.
|
||||||
n := 10
|
n := 10
|
||||||
d := 4
|
d := 4
|
||||||
iris := mat64.NewDense(n, d, []float64{
|
iris := mat.NewDense(n, d, []float64{
|
||||||
5.1, 3.5, 1.4, 0.2,
|
5.1, 3.5, 1.4, 0.2,
|
||||||
4.9, 3.0, 1.4, 0.2,
|
4.9, 3.0, 1.4, 0.2,
|
||||||
4.7, 3.2, 1.3, 0.2,
|
4.7, 3.2, 1.3, 0.2,
|
||||||
@@ -39,10 +39,10 @@ func ExamplePrincipalComponents() {
|
|||||||
|
|
||||||
// Project the data onto the first 2 principal components.
|
// Project the data onto the first 2 principal components.
|
||||||
k := 2
|
k := 2
|
||||||
var proj mat64.Dense
|
var proj mat.Dense
|
||||||
proj.Mul(iris, pc.Vectors(nil).Slice(0, d, 0, k))
|
proj.Mul(iris, pc.Vectors(nil).Slice(0, d, 0, k))
|
||||||
|
|
||||||
fmt.Printf("proj = %.4f", mat64.Formatted(&proj, mat64.Prefix(" ")))
|
fmt.Printf("proj = %.4f", mat.Formatted(&proj, mat.Prefix(" ")))
|
||||||
|
|
||||||
// Output:
|
// Output:
|
||||||
// variances = [0.1666 0.0207 0.0079 0.0019]
|
// variances = [0.1666 0.0207 0.0079 0.0019]
|
||||||
|
@@ -8,7 +8,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
var appengine bool
|
var appengine bool
|
||||||
@@ -19,20 +19,20 @@ func TestPrincipalComponents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
tests:
|
tests:
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
data mat64.Matrix
|
data mat.Matrix
|
||||||
weights []float64
|
weights []float64
|
||||||
wantVecs *mat64.Dense
|
wantVecs *mat.Dense
|
||||||
wantVars []float64
|
wantVars []float64
|
||||||
epsilon float64
|
epsilon float64
|
||||||
}{
|
}{
|
||||||
// Test results verified using R.
|
// Test results verified using R.
|
||||||
{
|
{
|
||||||
data: mat64.NewDense(3, 3, []float64{
|
data: mat.NewDense(3, 3, []float64{
|
||||||
1, 2, 3,
|
1, 2, 3,
|
||||||
4, 5, 6,
|
4, 5, 6,
|
||||||
7, 8, 9,
|
7, 8, 9,
|
||||||
}),
|
}),
|
||||||
wantVecs: mat64.NewDense(3, 3, []float64{
|
wantVecs: mat.NewDense(3, 3, []float64{
|
||||||
0.5773502691896258, 0.8164965809277261, 0,
|
0.5773502691896258, 0.8164965809277261, 0,
|
||||||
0.577350269189626, -0.4082482904638632, -0.7071067811865476,
|
0.577350269189626, -0.4082482904638632, -0.7071067811865476,
|
||||||
0.5773502691896258, -0.4082482904638631, 0.7071067811865475,
|
0.5773502691896258, -0.4082482904638631, 0.7071067811865475,
|
||||||
@@ -41,7 +41,7 @@ tests:
|
|||||||
epsilon: 1e-12,
|
epsilon: 1e-12,
|
||||||
},
|
},
|
||||||
{ // Truncated iris data.
|
{ // Truncated iris data.
|
||||||
data: mat64.NewDense(10, 4, []float64{
|
data: mat.NewDense(10, 4, []float64{
|
||||||
5.1, 3.5, 1.4, 0.2,
|
5.1, 3.5, 1.4, 0.2,
|
||||||
4.9, 3.0, 1.4, 0.2,
|
4.9, 3.0, 1.4, 0.2,
|
||||||
4.7, 3.2, 1.3, 0.2,
|
4.7, 3.2, 1.3, 0.2,
|
||||||
@@ -53,7 +53,7 @@ tests:
|
|||||||
4.4, 2.9, 1.4, 0.2,
|
4.4, 2.9, 1.4, 0.2,
|
||||||
4.9, 3.1, 1.5, 0.1,
|
4.9, 3.1, 1.5, 0.1,
|
||||||
}),
|
}),
|
||||||
wantVecs: mat64.NewDense(4, 4, []float64{
|
wantVecs: mat.NewDense(4, 4, []float64{
|
||||||
-0.6681110197952722, 0.7064764857539533, -0.14026590216895132, -0.18666578956412125,
|
-0.6681110197952722, 0.7064764857539533, -0.14026590216895132, -0.18666578956412125,
|
||||||
-0.7166344774801547, -0.6427036135482664, -0.135650285905254, 0.23444848208629923,
|
-0.7166344774801547, -0.6427036135482664, -0.135650285905254, 0.23444848208629923,
|
||||||
-0.164411275166307, 0.11898477441068218, 0.9136367900709548, 0.35224901970831746,
|
-0.164411275166307, 0.11898477441068218, 0.9136367900709548, 0.35224901970831746,
|
||||||
@@ -63,12 +63,12 @@ tests:
|
|||||||
epsilon: 1e-12,
|
epsilon: 1e-12,
|
||||||
},
|
},
|
||||||
{ // Truncated iris data to form wide matrix.
|
{ // Truncated iris data to form wide matrix.
|
||||||
data: mat64.NewDense(3, 4, []float64{
|
data: mat.NewDense(3, 4, []float64{
|
||||||
5.1, 3.5, 1.4, 0.2,
|
5.1, 3.5, 1.4, 0.2,
|
||||||
4.9, 3.0, 1.4, 0.2,
|
4.9, 3.0, 1.4, 0.2,
|
||||||
4.7, 3.2, 1.3, 0.2,
|
4.7, 3.2, 1.3, 0.2,
|
||||||
}),
|
}),
|
||||||
wantVecs: mat64.NewDense(4, 3, []float64{
|
wantVecs: mat.NewDense(4, 3, []float64{
|
||||||
-0.5705187254552365, -0.7505979435049239, 0.08084520834544455,
|
-0.5705187254552365, -0.7505979435049239, 0.08084520834544455,
|
||||||
-0.8166537769529318, 0.5615147645527523, -0.032338083338177705,
|
-0.8166537769529318, 0.5615147645527523, -0.032338083338177705,
|
||||||
-0.08709186238359454, -0.3482870890450082, -0.22636658336724505,
|
-0.08709186238359454, -0.3482870890450082, -0.22636658336724505,
|
||||||
@@ -78,7 +78,7 @@ tests:
|
|||||||
epsilon: 1e-12,
|
epsilon: 1e-12,
|
||||||
},
|
},
|
||||||
{ // Truncated iris data transposed to check for operation on fat input.
|
{ // Truncated iris data transposed to check for operation on fat input.
|
||||||
data: mat64.NewDense(10, 4, []float64{
|
data: mat.NewDense(10, 4, []float64{
|
||||||
5.1, 3.5, 1.4, 0.2,
|
5.1, 3.5, 1.4, 0.2,
|
||||||
4.9, 3.0, 1.4, 0.2,
|
4.9, 3.0, 1.4, 0.2,
|
||||||
4.7, 3.2, 1.3, 0.2,
|
4.7, 3.2, 1.3, 0.2,
|
||||||
@@ -90,7 +90,7 @@ tests:
|
|||||||
4.4, 2.9, 1.4, 0.2,
|
4.4, 2.9, 1.4, 0.2,
|
||||||
4.9, 3.1, 1.5, 0.1,
|
4.9, 3.1, 1.5, 0.1,
|
||||||
}).T(),
|
}).T(),
|
||||||
wantVecs: mat64.NewDense(10, 4, []float64{
|
wantVecs: mat.NewDense(10, 4, []float64{
|
||||||
-0.3366602459946619, -0.1373634006401213, 0.3465102523547623, -0.10290179303893479,
|
-0.3366602459946619, -0.1373634006401213, 0.3465102523547623, -0.10290179303893479,
|
||||||
-0.31381852053861975, 0.5197145790632827, 0.5567296129086686, -0.15923062170153618,
|
-0.31381852053861975, 0.5197145790632827, 0.5567296129086686, -0.15923062170153618,
|
||||||
-0.30857197637565165, -0.07670930360819002, 0.36159923003337235, 0.3342301027853355,
|
-0.30857197637565165, -0.07670930360819002, 0.36159923003337235, 0.3342301027853355,
|
||||||
@@ -106,7 +106,7 @@ tests:
|
|||||||
epsilon: 1e-12,
|
epsilon: 1e-12,
|
||||||
},
|
},
|
||||||
{ // Truncated iris data unitary weights.
|
{ // Truncated iris data unitary weights.
|
||||||
data: mat64.NewDense(10, 4, []float64{
|
data: mat.NewDense(10, 4, []float64{
|
||||||
5.1, 3.5, 1.4, 0.2,
|
5.1, 3.5, 1.4, 0.2,
|
||||||
4.9, 3.0, 1.4, 0.2,
|
4.9, 3.0, 1.4, 0.2,
|
||||||
4.7, 3.2, 1.3, 0.2,
|
4.7, 3.2, 1.3, 0.2,
|
||||||
@@ -119,7 +119,7 @@ tests:
|
|||||||
4.9, 3.1, 1.5, 0.1,
|
4.9, 3.1, 1.5, 0.1,
|
||||||
}),
|
}),
|
||||||
weights: []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
|
weights: []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
|
||||||
wantVecs: mat64.NewDense(4, 4, []float64{
|
wantVecs: mat.NewDense(4, 4, []float64{
|
||||||
-0.6681110197952722, 0.7064764857539533, -0.14026590216895132, -0.18666578956412125,
|
-0.6681110197952722, 0.7064764857539533, -0.14026590216895132, -0.18666578956412125,
|
||||||
-0.7166344774801547, -0.6427036135482664, -0.135650285905254, 0.23444848208629923,
|
-0.7166344774801547, -0.6427036135482664, -0.135650285905254, 0.23444848208629923,
|
||||||
-0.164411275166307, 0.11898477441068218, 0.9136367900709548, 0.35224901970831746,
|
-0.164411275166307, 0.11898477441068218, 0.9136367900709548, 0.35224901970831746,
|
||||||
@@ -129,7 +129,7 @@ tests:
|
|||||||
epsilon: 1e-12,
|
epsilon: 1e-12,
|
||||||
},
|
},
|
||||||
{ // Truncated iris data non-unitary weights.
|
{ // Truncated iris data non-unitary weights.
|
||||||
data: mat64.NewDense(10, 4, []float64{
|
data: mat.NewDense(10, 4, []float64{
|
||||||
5.1, 3.5, 1.4, 0.2,
|
5.1, 3.5, 1.4, 0.2,
|
||||||
4.9, 3.0, 1.4, 0.2,
|
4.9, 3.0, 1.4, 0.2,
|
||||||
4.7, 3.2, 1.3, 0.2,
|
4.7, 3.2, 1.3, 0.2,
|
||||||
@@ -142,7 +142,7 @@ tests:
|
|||||||
4.9, 3.1, 1.5, 0.1,
|
4.9, 3.1, 1.5, 0.1,
|
||||||
}),
|
}),
|
||||||
weights: []float64{2, 3, 1, 1, 1, 1, 1, 1, 1, 2},
|
weights: []float64{2, 3, 1, 1, 1, 1, 1, 1, 1, 2},
|
||||||
wantVecs: mat64.NewDense(4, 4, []float64{
|
wantVecs: mat.NewDense(4, 4, []float64{
|
||||||
-0.618936145422414, 0.763069301531647, 0.124857741232537, 0.138035623677211,
|
-0.618936145422414, 0.763069301531647, 0.124857741232537, 0.138035623677211,
|
||||||
-0.763958271606519, -0.603881770702898, 0.118267155321333, -0.194184052457746,
|
-0.763958271606519, -0.603881770702898, 0.118267155321333, -0.194184052457746,
|
||||||
-0.143552119754944, 0.090014599564871, -0.942209377020044, -0.289018426115945,
|
-0.143552119754944, 0.090014599564871, -0.942209377020044, -0.289018426115945,
|
||||||
@@ -153,7 +153,7 @@ tests:
|
|||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
var pc PC
|
var pc PC
|
||||||
var vecs *mat64.Dense
|
var vecs *mat.Dense
|
||||||
var vars []float64
|
var vars []float64
|
||||||
for j := 0; j < 2; j++ {
|
for j := 0; j < 2; j++ {
|
||||||
ok := pc.PrincipalComponents(test.data, test.weights)
|
ok := pc.PrincipalComponents(test.data, test.weights)
|
||||||
@@ -163,9 +163,9 @@ tests:
|
|||||||
t.Errorf("unexpected SVD failure for test %d use %d", i, j)
|
t.Errorf("unexpected SVD failure for test %d use %d", i, j)
|
||||||
continue tests
|
continue tests
|
||||||
}
|
}
|
||||||
if !mat64.EqualApprox(vecs, test.wantVecs, test.epsilon) {
|
if !mat.EqualApprox(vecs, test.wantVecs, test.epsilon) {
|
||||||
t.Errorf("%d use %d: unexpected PCA result got:\n%v\nwant:\n%v",
|
t.Errorf("%d use %d: unexpected PCA result got:\n%v\nwant:\n%v",
|
||||||
i, j, mat64.Formatted(vecs), mat64.Formatted(test.wantVecs))
|
i, j, mat.Formatted(vecs), mat.Formatted(test.wantVecs))
|
||||||
}
|
}
|
||||||
if !approxEqual(vars, test.wantVars, test.epsilon) {
|
if !approxEqual(vars, test.wantVars, test.epsilon) {
|
||||||
t.Errorf("%d use %d: unexpected variance result got:%v, want:%v",
|
t.Errorf("%d use %d: unexpected variance result got:%v, want:%v",
|
||||||
|
@@ -8,7 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat/distmv"
|
"gonum.org/v1/gonum/stat/distmv"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ type MetropolisHastingser struct {
|
|||||||
//
|
//
|
||||||
// The number of columns in batch must equal len(m.Initial), otherwise Sample
|
// The number of columns in batch must equal len(m.Initial), otherwise Sample
|
||||||
// will panic.
|
// will panic.
|
||||||
func (m MetropolisHastingser) Sample(batch *mat64.Dense) {
|
func (m MetropolisHastingser) Sample(batch *mat.Dense) {
|
||||||
burnIn := m.BurnIn
|
burnIn := m.BurnIn
|
||||||
rate := m.Rate
|
rate := m.Rate
|
||||||
if rate == 0 {
|
if rate == 0 {
|
||||||
@@ -74,7 +74,7 @@ func (m MetropolisHastingser) Sample(batch *mat64.Dense) {
|
|||||||
// during the rate portion.
|
// during the rate portion.
|
||||||
tmp := batch
|
tmp := batch
|
||||||
if rate > r {
|
if rate > r {
|
||||||
tmp = mat64.NewDense(rate, c, nil)
|
tmp = mat.NewDense(rate, c, nil)
|
||||||
}
|
}
|
||||||
rTmp, _ := tmp.Dims()
|
rTmp, _ := tmp.Dims()
|
||||||
|
|
||||||
@@ -84,7 +84,7 @@ func (m MetropolisHastingser) Sample(batch *mat64.Dense) {
|
|||||||
copy(initial, m.Initial)
|
copy(initial, m.Initial)
|
||||||
for remaining != 0 {
|
for remaining != 0 {
|
||||||
newSamp := min(rTmp, remaining)
|
newSamp := min(rTmp, remaining)
|
||||||
MetropolisHastings(tmp.View(0, 0, newSamp, c).(*mat64.Dense), initial, m.Target, m.Proposal, m.Src)
|
MetropolisHastings(tmp.View(0, 0, newSamp, c).(*mat.Dense), initial, m.Target, m.Proposal, m.Src)
|
||||||
copy(initial, tmp.RawRowView(newSamp-1))
|
copy(initial, tmp.RawRowView(newSamp-1))
|
||||||
remaining -= newSamp
|
remaining -= newSamp
|
||||||
}
|
}
|
||||||
@@ -95,11 +95,11 @@ func (m MetropolisHastingser) Sample(batch *mat64.Dense) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if rTmp <= r {
|
if rTmp <= r {
|
||||||
tmp = mat64.NewDense(rate, c, nil)
|
tmp = mat.NewDense(rate, c, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take a single sample from the chain.
|
// Take a single sample from the chain.
|
||||||
MetropolisHastings(batch.View(0, 0, 1, c).(*mat64.Dense), initial, m.Target, m.Proposal, m.Src)
|
MetropolisHastings(batch.View(0, 0, 1, c).(*mat.Dense), initial, m.Target, m.Proposal, m.Src)
|
||||||
|
|
||||||
copy(initial, batch.RawRowView(0))
|
copy(initial, batch.RawRowView(0))
|
||||||
// For all of the other samples, first generate Rate samples and then actually
|
// For all of the other samples, first generate Rate samples and then actually
|
||||||
@@ -139,7 +139,7 @@ func (m MetropolisHastingser) Sample(batch *mat64.Dense) {
|
|||||||
// are ignored in between each kept sample. This helps decorrelate
|
// are ignored in between each kept sample. This helps decorrelate
|
||||||
// the samples from one another, but also reduces the number of available samples.
|
// the samples from one another, but also reduces the number of available samples.
|
||||||
// A sampling rate can be implemented with successive calls to MetropolisHastings.
|
// A sampling rate can be implemented with successive calls to MetropolisHastings.
|
||||||
func MetropolisHastings(batch *mat64.Dense, initial []float64, target distmv.LogProber, proposal MHProposal, src *rand.Rand) {
|
func MetropolisHastings(batch *mat.Dense, initial []float64, target distmv.LogProber, proposal MHProposal, src *rand.Rand) {
|
||||||
f64 := rand.Float64
|
f64 := rand.Float64
|
||||||
if src != nil {
|
if src != nil {
|
||||||
f64 = src.Float64
|
f64 = src.Float64
|
||||||
@@ -180,7 +180,7 @@ type ProposalNormal struct {
|
|||||||
// and the mean of the distribution changes.
|
// and the mean of the distribution changes.
|
||||||
//
|
//
|
||||||
// NewProposalNormal returns {nil, false} if the covariance matrix is not positive-definite.
|
// NewProposalNormal returns {nil, false} if the covariance matrix is not positive-definite.
|
||||||
func NewProposalNormal(sigma *mat64.SymDense, src *rand.Rand) (*ProposalNormal, bool) {
|
func NewProposalNormal(sigma *mat.SymDense, src *rand.Rand) (*ProposalNormal, bool) {
|
||||||
mu := make([]float64, sigma.Symmetric())
|
mu := make([]float64, sigma.Symmetric())
|
||||||
normal, ok := distmv.NewNormal(mu, sigma, src)
|
normal, ok := distmv.NewNormal(mu, sigma, src)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@@ -10,7 +10,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
"gonum.org/v1/gonum/stat/distmv"
|
"gonum.org/v1/gonum/stat/distmv"
|
||||||
)
|
)
|
||||||
@@ -28,7 +28,7 @@ func TestLatinHypercube(t *testing.T) {
|
|||||||
distmv.NewUniform([]distmv.Bound{{0, 3}, {-1, 5}, {-4, -1}}, nil),
|
distmv.NewUniform([]distmv.Bound{{0, 3}, {-1, 5}, {-4, -1}}, nil),
|
||||||
} {
|
} {
|
||||||
dim := dist.Dim()
|
dim := dist.Dim()
|
||||||
batch := mat64.NewDense(nSamples, dim, nil)
|
batch := mat.NewDense(nSamples, dim, nil)
|
||||||
LatinHypercube(batch, dist, nil)
|
LatinHypercube(batch, dist, nil)
|
||||||
// Latin hypercube should have one entry per hyperrow.
|
// Latin hypercube should have one entry per hyperrow.
|
||||||
present := make([][]bool, nSamples)
|
present := make([][]bool, nSamples)
|
||||||
@@ -68,7 +68,7 @@ func TestImportance(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
muImp := make([]float64, dim)
|
muImp := make([]float64, dim)
|
||||||
sigmaImp := mat64.NewSymDense(dim, nil)
|
sigmaImp := mat.NewSymDense(dim, nil)
|
||||||
for i := 0; i < dim; i++ {
|
for i := 0; i < dim; i++ {
|
||||||
sigmaImp.SetSym(i, i, 3)
|
sigmaImp.SetSym(i, i, 3)
|
||||||
}
|
}
|
||||||
@@ -78,7 +78,7 @@ func TestImportance(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nSamples := 100000
|
nSamples := 100000
|
||||||
batch := mat64.NewDense(nSamples, dim, nil)
|
batch := mat.NewDense(nSamples, dim, nil)
|
||||||
weights := make([]float64, nSamples)
|
weights := make([]float64, nSamples)
|
||||||
Importance(batch, weights, target, proposal)
|
Importance(batch, weights, target, proposal)
|
||||||
|
|
||||||
@@ -102,7 +102,7 @@ func TestRejection(t *testing.T) {
|
|||||||
mu := target.Mean(nil)
|
mu := target.Mean(nil)
|
||||||
|
|
||||||
muImp := make([]float64, dim)
|
muImp := make([]float64, dim)
|
||||||
sigmaImp := mat64.NewSymDense(dim, nil)
|
sigmaImp := mat.NewSymDense(dim, nil)
|
||||||
for i := 0; i < dim; i++ {
|
for i := 0; i < dim; i++ {
|
||||||
sigmaImp.SetSym(i, i, 6)
|
sigmaImp.SetSym(i, i, 6)
|
||||||
}
|
}
|
||||||
@@ -112,7 +112,7 @@ func TestRejection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nSamples := 1000
|
nSamples := 1000
|
||||||
batch := mat64.NewDense(nSamples, dim, nil)
|
batch := mat.NewDense(nSamples, dim, nil)
|
||||||
weights := make([]float64, nSamples)
|
weights := make([]float64, nSamples)
|
||||||
_, ok = Rejection(batch, target, proposal, 1000, nil)
|
_, ok = Rejection(batch, target, proposal, 1000, nil)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -120,7 +120,7 @@ func TestRejection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < dim; i++ {
|
for i := 0; i < dim; i++ {
|
||||||
col := mat64.Col(nil, i, batch)
|
col := mat.Col(nil, i, batch)
|
||||||
ev := stat.Mean(col, weights)
|
ev := stat.Mean(col, weights)
|
||||||
if math.Abs(ev-mu[i]) > 1e-2 {
|
if math.Abs(ev-mu[i]) > 1e-2 {
|
||||||
t.Errorf("Mean mismatch: Want %v, got %v", mu[i], ev)
|
t.Errorf("Mean mismatch: Want %v, got %v", mu[i], ev)
|
||||||
@@ -136,7 +136,7 @@ func TestMetropolisHastings(t *testing.T) {
|
|||||||
t.Fatal("bad test, sigma not pos def")
|
t.Fatal("bad test, sigma not pos def")
|
||||||
}
|
}
|
||||||
|
|
||||||
sigmaImp := mat64.NewSymDense(dim, nil)
|
sigmaImp := mat.NewSymDense(dim, nil)
|
||||||
for i := 0; i < dim; i++ {
|
for i := 0; i < dim; i++ {
|
||||||
sigmaImp.SetSym(i, i, 0.25)
|
sigmaImp.SetSym(i, i, 0.25)
|
||||||
}
|
}
|
||||||
@@ -147,10 +147,10 @@ func TestMetropolisHastings(t *testing.T) {
|
|||||||
|
|
||||||
nSamples := 1000000
|
nSamples := 1000000
|
||||||
burnin := 5000
|
burnin := 5000
|
||||||
batch := mat64.NewDense(nSamples, dim, nil)
|
batch := mat.NewDense(nSamples, dim, nil)
|
||||||
initial := make([]float64, dim)
|
initial := make([]float64, dim)
|
||||||
MetropolisHastings(batch, initial, target, proposal, nil)
|
MetropolisHastings(batch, initial, target, proposal, nil)
|
||||||
batch = batch.View(burnin, 0, nSamples-burnin, dim).(*mat64.Dense)
|
batch = batch.View(burnin, 0, nSamples-burnin, dim).(*mat.Dense)
|
||||||
|
|
||||||
compareNormal(t, target, batch, nil)
|
compareNormal(t, target, batch, nil)
|
||||||
}
|
}
|
||||||
@@ -161,8 +161,8 @@ func randomNormal(dim int) (*distmv.Normal, bool) {
|
|||||||
for i := range data {
|
for i := range data {
|
||||||
data[i] = rand.Float64()
|
data[i] = rand.Float64()
|
||||||
}
|
}
|
||||||
a := mat64.NewDense(dim, dim, data)
|
a := mat.NewDense(dim, dim, data)
|
||||||
var sigma mat64.SymDense
|
var sigma mat.SymDense
|
||||||
sigma.SymOuterK(1, a)
|
sigma.SymOuterK(1, a)
|
||||||
mu := make([]float64, dim)
|
mu := make([]float64, dim)
|
||||||
for i := range mu {
|
for i := range mu {
|
||||||
@@ -171,7 +171,7 @@ func randomNormal(dim int) (*distmv.Normal, bool) {
|
|||||||
return distmv.NewNormal(mu, &sigma, nil)
|
return distmv.NewNormal(mu, &sigma, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func compareNormal(t *testing.T, want *distmv.Normal, batch *mat64.Dense, weights []float64) {
|
func compareNormal(t *testing.T, want *distmv.Normal, batch *mat.Dense, weights []float64) {
|
||||||
dim := want.Dim()
|
dim := want.Dim()
|
||||||
mu := want.Mean(nil)
|
mu := want.Mean(nil)
|
||||||
sigma := want.CovarianceMatrix(nil)
|
sigma := want.CovarianceMatrix(nil)
|
||||||
@@ -183,7 +183,7 @@ func compareNormal(t *testing.T, want *distmv.Normal, batch *mat64.Dense, weight
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < dim; i++ {
|
for i := 0; i < dim; i++ {
|
||||||
col := mat64.Col(nil, i, batch)
|
col := mat.Col(nil, i, batch)
|
||||||
ev := stat.Mean(col, weights)
|
ev := stat.Mean(col, weights)
|
||||||
if math.Abs(ev-mu[i]) > 1e-2 {
|
if math.Abs(ev-mu[i]) > 1e-2 {
|
||||||
t.Errorf("Mean mismatch: Want %v, got %v", mu[i], ev)
|
t.Errorf("Mean mismatch: Want %v, got %v", mu[i], ev)
|
||||||
@@ -191,7 +191,7 @@ func compareNormal(t *testing.T, want *distmv.Normal, batch *mat64.Dense, weight
|
|||||||
}
|
}
|
||||||
|
|
||||||
cov := stat.CovarianceMatrix(nil, batch, weights)
|
cov := stat.CovarianceMatrix(nil, batch, weights)
|
||||||
if !mat64.EqualApprox(cov, sigma, 1.5e-1) {
|
if !mat.EqualApprox(cov, sigma, 1.5e-1) {
|
||||||
t.Errorf("Covariance matrix mismatch")
|
t.Errorf("Covariance matrix mismatch")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -222,7 +222,7 @@ func TestMetropolisHastingser(t *testing.T) {
|
|||||||
t.Fatal("bad test, sigma not pos def")
|
t.Fatal("bad test, sigma not pos def")
|
||||||
}
|
}
|
||||||
|
|
||||||
sigmaImp := mat64.NewSymDense(dim, nil)
|
sigmaImp := mat.NewSymDense(dim, nil)
|
||||||
for i := 0; i < dim; i++ {
|
for i := 0; i < dim; i++ {
|
||||||
sigmaImp.SetSym(i, i, 0.25)
|
sigmaImp.SetSym(i, i, 0.25)
|
||||||
}
|
}
|
||||||
@@ -245,7 +245,7 @@ func TestMetropolisHastingser(t *testing.T) {
|
|||||||
samples := test.samples
|
samples := test.samples
|
||||||
burnin := test.burnin
|
burnin := test.burnin
|
||||||
rate := test.rate
|
rate := test.rate
|
||||||
fullBatch := mat64.NewDense(1+burnin+rate*(samples-1), dim, nil)
|
fullBatch := mat.NewDense(1+burnin+rate*(samples-1), dim, nil)
|
||||||
mh.Sample(fullBatch)
|
mh.Sample(fullBatch)
|
||||||
mh = MetropolisHastingser{
|
mh = MetropolisHastingser{
|
||||||
Initial: initial,
|
Initial: initial,
|
||||||
@@ -256,7 +256,7 @@ func TestMetropolisHastingser(t *testing.T) {
|
|||||||
Rate: rate,
|
Rate: rate,
|
||||||
}
|
}
|
||||||
rand.Seed(int64(seed))
|
rand.Seed(int64(seed))
|
||||||
batch := mat64.NewDense(samples, dim, nil)
|
batch := mat.NewDense(samples, dim, nil)
|
||||||
mh.Sample(batch)
|
mh.Sample(batch)
|
||||||
|
|
||||||
same := true
|
same := true
|
||||||
@@ -271,8 +271,8 @@ func TestMetropolisHastingser(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !same {
|
if !same {
|
||||||
fmt.Printf("%v\n", mat64.Formatted(batch))
|
fmt.Printf("%v\n", mat.Formatted(batch))
|
||||||
fmt.Printf("%v\n", mat64.Formatted(fullBatch))
|
fmt.Printf("%v\n", mat.Formatted(fullBatch))
|
||||||
|
|
||||||
t.Errorf("sampling mismatch: dim = %v, burnin = %v, rate = %v, samples = %v", dim, burnin, rate, samples)
|
t.Errorf("sampling mismatch: dim = %v, burnin = %v, rate = %v, samples = %v", dim, burnin, rate, samples)
|
||||||
}
|
}
|
||||||
|
@@ -15,7 +15,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/stat/distmv"
|
"gonum.org/v1/gonum/stat/distmv"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ func min(a, b int) int {
|
|||||||
// implementing type. The number of samples generated is equal to rows(batch),
|
// implementing type. The number of samples generated is equal to rows(batch),
|
||||||
// and the samples are stored in-place into the input.
|
// and the samples are stored in-place into the input.
|
||||||
type Sampler interface {
|
type Sampler interface {
|
||||||
Sample(batch *mat64.Dense)
|
Sample(batch *mat.Dense)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WeightedSampler generates a batch of samples and their relative weights
|
// WeightedSampler generates a batch of samples and their relative weights
|
||||||
@@ -52,7 +52,7 @@ type Sampler interface {
|
|||||||
// are stored in-place into the inputs. The length of weights must equal
|
// are stored in-place into the inputs. The length of weights must equal
|
||||||
// rows(batch), otherwise SampleWeighted will panic.
|
// rows(batch), otherwise SampleWeighted will panic.
|
||||||
type WeightedSampler interface {
|
type WeightedSampler interface {
|
||||||
SampleWeighted(batch *mat64.Dense, weights []float64)
|
SampleWeighted(batch *mat.Dense, weights []float64)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SampleUniformWeighted wraps a Sampler type to create a WeightedSampler where all
|
// SampleUniformWeighted wraps a Sampler type to create a WeightedSampler where all
|
||||||
@@ -64,7 +64,7 @@ type SampleUniformWeighted struct {
|
|||||||
// SampleWeighted generates rows(batch) samples from the embedded Sampler type
|
// SampleWeighted generates rows(batch) samples from the embedded Sampler type
|
||||||
// and sets all of the weights equal to 1. If rows(batch) and len(weights)
|
// and sets all of the weights equal to 1. If rows(batch) and len(weights)
|
||||||
// of weights are not equal, SampleWeighted will panic.
|
// of weights are not equal, SampleWeighted will panic.
|
||||||
func (w SampleUniformWeighted) SampleWeighted(batch *mat64.Dense, weights []float64) {
|
func (w SampleUniformWeighted) SampleWeighted(batch *mat.Dense, weights []float64) {
|
||||||
r, _ := batch.Dims()
|
r, _ := batch.Dims()
|
||||||
if r != len(weights) {
|
if r != len(weights) {
|
||||||
panic(badLengthMismatch)
|
panic(badLengthMismatch)
|
||||||
@@ -84,7 +84,7 @@ type LatinHypercuber struct {
|
|||||||
|
|
||||||
// Sample generates rows(batch) samples using the LatinHypercube generation
|
// Sample generates rows(batch) samples using the LatinHypercube generation
|
||||||
// procedure.
|
// procedure.
|
||||||
func (l LatinHypercuber) Sample(batch *mat64.Dense) {
|
func (l LatinHypercuber) Sample(batch *mat.Dense) {
|
||||||
LatinHypercube(batch, l.Q, l.Src)
|
LatinHypercube(batch, l.Q, l.Src)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,7 +96,7 @@ func (l LatinHypercuber) Sample(batch *mat64.Dense) {
|
|||||||
// spaced bins and guarantees that one sample is generated per bin. Within each bin,
|
// spaced bins and guarantees that one sample is generated per bin. Within each bin,
|
||||||
// the location is randomly sampled. The distmv.NewUnitUniform function can be used
|
// the location is randomly sampled. The distmv.NewUnitUniform function can be used
|
||||||
// for easy sampling from the unit hypercube.
|
// for easy sampling from the unit hypercube.
|
||||||
func LatinHypercube(batch *mat64.Dense, q distmv.Quantiler, src *rand.Rand) {
|
func LatinHypercube(batch *mat.Dense, q distmv.Quantiler, src *rand.Rand) {
|
||||||
r, c := batch.Dims()
|
r, c := batch.Dims()
|
||||||
var f64 func() float64
|
var f64 func() float64
|
||||||
var perm func(int) []int
|
var perm func(int) []int
|
||||||
@@ -132,7 +132,7 @@ type Importancer struct {
|
|||||||
|
|
||||||
// SampleWeighted generates rows(batch) samples using the Importance sampling
|
// SampleWeighted generates rows(batch) samples using the Importance sampling
|
||||||
// generation procedure.
|
// generation procedure.
|
||||||
func (l Importancer) SampleWeighted(batch *mat64.Dense, weights []float64) {
|
func (l Importancer) SampleWeighted(batch *mat.Dense, weights []float64) {
|
||||||
Importance(batch, weights, l.Target, l.Proposal)
|
Importance(batch, weights, l.Target, l.Proposal)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -150,7 +150,7 @@ func (l Importancer) SampleWeighted(batch *mat64.Dense, weights []float64) {
|
|||||||
//
|
//
|
||||||
// If weights is nil, the weights are not stored. The length of weights must equal
|
// If weights is nil, the weights are not stored. The length of weights must equal
|
||||||
// the length of batch, otherwise Importance will panic.
|
// the length of batch, otherwise Importance will panic.
|
||||||
func Importance(batch *mat64.Dense, weights []float64, target distmv.LogProber, proposal distmv.RandLogProber) {
|
func Importance(batch *mat.Dense, weights []float64, target distmv.LogProber, proposal distmv.RandLogProber) {
|
||||||
r, _ := batch.Dims()
|
r, _ := batch.Dims()
|
||||||
if r != len(weights) {
|
if r != len(weights) {
|
||||||
panic(badLengthMismatch)
|
panic(badLengthMismatch)
|
||||||
@@ -194,7 +194,7 @@ func (r *Rejectioner) Proposed() int {
|
|||||||
// Rejection sampling may fail if the constant is insufficiently high, as described
|
// Rejection sampling may fail if the constant is insufficiently high, as described
|
||||||
// in the function comment for Rejection. If the generation fails, the samples
|
// in the function comment for Rejection. If the generation fails, the samples
|
||||||
// are set to math.NaN(), and a call to Err will return a non-nil value.
|
// are set to math.NaN(), and a call to Err will return a non-nil value.
|
||||||
func (r *Rejectioner) Sample(batch *mat64.Dense) {
|
func (r *Rejectioner) Sample(batch *mat.Dense) {
|
||||||
r.err = nil
|
r.err = nil
|
||||||
r.proposed = 0
|
r.proposed = 0
|
||||||
proposed, ok := Rejection(batch, r.Target, r.Proposal, r.C, r.Src)
|
proposed, ok := Rejection(batch, r.Target, r.Proposal, r.C, r.Src)
|
||||||
@@ -225,7 +225,7 @@ func (r *Rejectioner) Sample(batch *mat64.Dense) {
|
|||||||
// a value that is proportional to the probability (logprob + constant). This is
|
// a value that is proportional to the probability (logprob + constant). This is
|
||||||
// useful for cases where the probability distribution is only known up to a normalization
|
// useful for cases where the probability distribution is only known up to a normalization
|
||||||
// constant.
|
// constant.
|
||||||
func Rejection(batch *mat64.Dense, target distmv.LogProber, proposal distmv.RandLogProber, c float64, src *rand.Rand) (nProposed int, ok bool) {
|
func Rejection(batch *mat.Dense, target distmv.LogProber, proposal distmv.RandLogProber, c float64, src *rand.Rand) (nProposed int, ok bool) {
|
||||||
if c < 1 {
|
if c < 1 {
|
||||||
panic("rejection: acceptance constant must be greater than 1")
|
panic("rejection: acceptance constant must be greater than 1")
|
||||||
}
|
}
|
||||||
@@ -268,13 +268,13 @@ type IIDer struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sample generates a set of identically and independently distributed samples.
|
// Sample generates a set of identically and independently distributed samples.
|
||||||
func (iid IIDer) Sample(batch *mat64.Dense) {
|
func (iid IIDer) Sample(batch *mat.Dense) {
|
||||||
IID(batch, iid.Dist)
|
IID(batch, iid.Dist)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IID generates a set of independently and identically distributed samples from
|
// IID generates a set of independently and identically distributed samples from
|
||||||
// the input distribution.
|
// the input distribution.
|
||||||
func IID(batch *mat64.Dense, d distmv.Rander) {
|
func IID(batch *mat.Dense, d distmv.Rander) {
|
||||||
r, _ := batch.Dims()
|
r, _ := batch.Dims()
|
||||||
for i := 0; i < r; i++ {
|
for i := 0; i < r; i++ {
|
||||||
d.Rand(batch.RawRowView(i))
|
d.Rand(batch.RawRowView(i))
|
||||||
|
@@ -8,8 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix"
|
"gonum.org/v1/gonum/mat"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CovarianceMatrix returns the covariance matrix (also known as the
|
// CovarianceMatrix returns the covariance matrix (also known as the
|
||||||
@@ -21,9 +20,9 @@ import (
|
|||||||
// must not contain negative elements.
|
// must not contain negative elements.
|
||||||
// If cov is not nil it must either be zero-sized or have the same number of
|
// If cov is not nil it must either be zero-sized or have the same number of
|
||||||
// columns as the input data matrix. cov will be used as the destination for
|
// columns as the input data matrix. cov will be used as the destination for
|
||||||
// the covariance data. If cov is nil, a new mat64.SymDense is allocated for
|
// the covariance data. If cov is nil, a new mat.SymDense is allocated for
|
||||||
// the destination.
|
// the destination.
|
||||||
func CovarianceMatrix(cov *mat64.SymDense, x mat64.Matrix, weights []float64) *mat64.SymDense {
|
func CovarianceMatrix(cov *mat.SymDense, x mat.Matrix, weights []float64) *mat.SymDense {
|
||||||
// This is the matrix version of the two-pass algorithm. It doesn't use the
|
// This is the matrix version of the two-pass algorithm. It doesn't use the
|
||||||
// additional floating point error correction that the Covariance function uses
|
// additional floating point error correction that the Covariance function uses
|
||||||
// to reduce the impact of rounding during centering.
|
// to reduce the impact of rounding during centering.
|
||||||
@@ -31,12 +30,12 @@ func CovarianceMatrix(cov *mat64.SymDense, x mat64.Matrix, weights []float64) *m
|
|||||||
r, c := x.Dims()
|
r, c := x.Dims()
|
||||||
|
|
||||||
if cov == nil {
|
if cov == nil {
|
||||||
cov = mat64.NewSymDense(c, nil)
|
cov = mat.NewSymDense(c, nil)
|
||||||
} else if n := cov.Symmetric(); n != c && n != 0 {
|
} else if n := cov.Symmetric(); n != c && n != 0 {
|
||||||
panic(matrix.ErrShape)
|
panic(mat.ErrShape)
|
||||||
}
|
}
|
||||||
|
|
||||||
var xt mat64.Dense
|
var xt mat.Dense
|
||||||
xt.Clone(x.T())
|
xt.Clone(x.T())
|
||||||
// Subtract the mean of each of the columns.
|
// Subtract the mean of each of the columns.
|
||||||
for i := 0; i < c; i++ {
|
for i := 0; i < c; i++ {
|
||||||
@@ -82,9 +81,9 @@ func CovarianceMatrix(cov *mat64.SymDense, x mat64.Matrix, weights []float64) *m
|
|||||||
// must not contain negative elements.
|
// must not contain negative elements.
|
||||||
// If corr is not nil it must either be zero-sized or have the same number of
|
// If corr is not nil it must either be zero-sized or have the same number of
|
||||||
// columns as the input data matrix. corr will be used as the destination for
|
// columns as the input data matrix. corr will be used as the destination for
|
||||||
// the correlation data. If corr is nil, a new mat64.SymDense is allocated for
|
// the correlation data. If corr is nil, a new mat.SymDense is allocated for
|
||||||
// the destination.
|
// the destination.
|
||||||
func CorrelationMatrix(corr *mat64.SymDense, x mat64.Matrix, weights []float64) *mat64.SymDense {
|
func CorrelationMatrix(corr *mat.SymDense, x mat.Matrix, weights []float64) *mat.SymDense {
|
||||||
// This will panic if the sizes don't match, or if weights is the wrong size.
|
// This will panic if the sizes don't match, or if weights is the wrong size.
|
||||||
corr = CovarianceMatrix(corr, x, weights)
|
corr = CovarianceMatrix(corr, x, weights)
|
||||||
covToCorr(corr)
|
covToCorr(corr)
|
||||||
@@ -92,7 +91,7 @@ func CorrelationMatrix(corr *mat64.SymDense, x mat64.Matrix, weights []float64)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// covToCorr converts a covariance matrix to a correlation matrix.
|
// covToCorr converts a covariance matrix to a correlation matrix.
|
||||||
func covToCorr(c *mat64.SymDense) {
|
func covToCorr(c *mat.SymDense) {
|
||||||
r := c.Symmetric()
|
r := c.Symmetric()
|
||||||
|
|
||||||
s := make([]float64, r)
|
s := make([]float64, r)
|
||||||
@@ -113,11 +112,11 @@ func covToCorr(c *mat64.SymDense) {
|
|||||||
// The input sigma should be vector of standard deviations corresponding
|
// The input sigma should be vector of standard deviations corresponding
|
||||||
// to the covariance. It will panic if len(sigma) is not equal to the
|
// to the covariance. It will panic if len(sigma) is not equal to the
|
||||||
// number of rows in the correlation matrix.
|
// number of rows in the correlation matrix.
|
||||||
func corrToCov(c *mat64.SymDense, sigma []float64) {
|
func corrToCov(c *mat.SymDense, sigma []float64) {
|
||||||
r, _ := c.Dims()
|
r, _ := c.Dims()
|
||||||
|
|
||||||
if r != len(sigma) {
|
if r != len(sigma) {
|
||||||
panic(matrix.ErrShape)
|
panic(mat.ErrShape)
|
||||||
}
|
}
|
||||||
for i, sx := range sigma {
|
for i, sx := range sigma {
|
||||||
// Ensure that the diagonal has exactly sigma squared.
|
// Ensure that the diagonal has exactly sigma squared.
|
||||||
@@ -135,13 +134,13 @@ func corrToCov(c *mat64.SymDense, sigma []float64) {
|
|||||||
// Mahalanobis returns NaN if the linear solve fails.
|
// Mahalanobis returns NaN if the linear solve fails.
|
||||||
//
|
//
|
||||||
// See https://en.wikipedia.org/wiki/Mahalanobis_distance for more information.
|
// See https://en.wikipedia.org/wiki/Mahalanobis_distance for more information.
|
||||||
func Mahalanobis(x, y *mat64.Vector, chol *mat64.Cholesky) float64 {
|
func Mahalanobis(x, y *mat.Vector, chol *mat.Cholesky) float64 {
|
||||||
var diff mat64.Vector
|
var diff mat.Vector
|
||||||
diff.SubVec(x, y)
|
diff.SubVec(x, y)
|
||||||
var tmp mat64.Vector
|
var tmp mat.Vector
|
||||||
err := tmp.SolveCholeskyVec(chol, &diff)
|
err := tmp.SolveCholeskyVec(chol, &diff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return math.NaN()
|
return math.NaN()
|
||||||
}
|
}
|
||||||
return math.Sqrt(mat64.Dot(&tmp, &diff))
|
return math.Sqrt(mat.Dot(&tmp, &diff))
|
||||||
}
|
}
|
||||||
|
@@ -10,19 +10,19 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/matrix/mat64"
|
"gonum.org/v1/gonum/mat"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCovarianceMatrix(t *testing.T) {
|
func TestCovarianceMatrix(t *testing.T) {
|
||||||
// An alternative way to test this is to call the Variance and
|
// An alternative way to test this is to call the Variance and
|
||||||
// Covariance functions and ensure that the results are identical.
|
// Covariance functions and ensure that the results are identical.
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
data *mat64.Dense
|
data *mat.Dense
|
||||||
weights []float64
|
weights []float64
|
||||||
ans *mat64.Dense
|
ans *mat.Dense
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
data: mat64.NewDense(5, 2, []float64{
|
data: mat.NewDense(5, 2, []float64{
|
||||||
-2, -4,
|
-2, -4,
|
||||||
-1, 2,
|
-1, 2,
|
||||||
0, 0,
|
0, 0,
|
||||||
@@ -30,12 +30,12 @@ func TestCovarianceMatrix(t *testing.T) {
|
|||||||
2, 4,
|
2, 4,
|
||||||
}),
|
}),
|
||||||
weights: nil,
|
weights: nil,
|
||||||
ans: mat64.NewDense(2, 2, []float64{
|
ans: mat.NewDense(2, 2, []float64{
|
||||||
2.5, 3,
|
2.5, 3,
|
||||||
3, 10,
|
3, 10,
|
||||||
}),
|
}),
|
||||||
}, {
|
}, {
|
||||||
data: mat64.NewDense(3, 2, []float64{
|
data: mat.NewDense(3, 2, []float64{
|
||||||
1, 1,
|
1, 1,
|
||||||
2, 4,
|
2, 4,
|
||||||
3, 9,
|
3, 9,
|
||||||
@@ -45,7 +45,7 @@ func TestCovarianceMatrix(t *testing.T) {
|
|||||||
1.5,
|
1.5,
|
||||||
1,
|
1,
|
||||||
},
|
},
|
||||||
ans: mat64.NewDense(2, 2, []float64{
|
ans: mat.NewDense(2, 2, []float64{
|
||||||
.8, 3.2,
|
.8, 3.2,
|
||||||
3.2, 13.142857142857146,
|
3.2, 13.142857142857146,
|
||||||
}),
|
}),
|
||||||
@@ -60,9 +60,9 @@ func TestCovarianceMatrix(t *testing.T) {
|
|||||||
if test.weights != nil {
|
if test.weights != nil {
|
||||||
copy(w, test.weights)
|
copy(w, test.weights)
|
||||||
}
|
}
|
||||||
for _, cov := range []*mat64.SymDense{nil, {}} {
|
for _, cov := range []*mat.SymDense{nil, {}} {
|
||||||
c := CovarianceMatrix(cov, test.data, test.weights)
|
c := CovarianceMatrix(cov, test.data, test.weights)
|
||||||
if !mat64.Equal(c, test.ans) {
|
if !mat.Equal(c, test.ans) {
|
||||||
t.Errorf("%d: expected cov %v, found %v", i, test.ans, c)
|
t.Errorf("%d: expected cov %v, found %v", i, test.ans, c)
|
||||||
}
|
}
|
||||||
if !floats.Equal(d, r.Data) {
|
if !floats.Equal(d, r.Data) {
|
||||||
@@ -76,8 +76,8 @@ func TestCovarianceMatrix(t *testing.T) {
|
|||||||
_, cols := c.Dims()
|
_, cols := c.Dims()
|
||||||
for ci := 0; ci < cols; ci++ {
|
for ci := 0; ci < cols; ci++ {
|
||||||
for cj := 0; cj < cols; cj++ {
|
for cj := 0; cj < cols; cj++ {
|
||||||
x := mat64.Col(nil, ci, test.data)
|
x := mat.Col(nil, ci, test.data)
|
||||||
y := mat64.Col(nil, cj, test.data)
|
y := mat.Col(nil, cj, test.data)
|
||||||
cov := Covariance(x, y, test.weights)
|
cov := Covariance(x, y, test.weights)
|
||||||
if math.Abs(cov-c.At(ci, cj)) > 1e-14 {
|
if math.Abs(cov-c.At(ci, cj)) > 1e-14 {
|
||||||
t.Errorf("CovMat does not match at (%v, %v). Want %v, got %v.", ci, cj, cov, c.At(ci, cj))
|
t.Errorf("CovMat does not match at (%v, %v). Want %v, got %v.", ci, cj, cov, c.At(ci, cj))
|
||||||
@@ -87,38 +87,38 @@ func TestCovarianceMatrix(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if !Panics(func() { CovarianceMatrix(nil, mat64.NewDense(5, 2, nil), []float64{}) }) {
|
if !Panics(func() { CovarianceMatrix(nil, mat.NewDense(5, 2, nil), []float64{}) }) {
|
||||||
t.Errorf("CovarianceMatrix did not panic with weight size mismatch")
|
t.Errorf("CovarianceMatrix did not panic with weight size mismatch")
|
||||||
}
|
}
|
||||||
if !Panics(func() { CovarianceMatrix(mat64.NewSymDense(1, nil), mat64.NewDense(5, 2, nil), nil) }) {
|
if !Panics(func() { CovarianceMatrix(mat.NewSymDense(1, nil), mat.NewDense(5, 2, nil), nil) }) {
|
||||||
t.Errorf("CovarianceMatrix did not panic with preallocation size mismatch")
|
t.Errorf("CovarianceMatrix did not panic with preallocation size mismatch")
|
||||||
}
|
}
|
||||||
if !Panics(func() { CovarianceMatrix(nil, mat64.NewDense(2, 2, []float64{1, 2, 3, 4}), []float64{1, -1}) }) {
|
if !Panics(func() { CovarianceMatrix(nil, mat.NewDense(2, 2, []float64{1, 2, 3, 4}), []float64{1, -1}) }) {
|
||||||
t.Errorf("CovarianceMatrix did not panic with negative weights")
|
t.Errorf("CovarianceMatrix did not panic with negative weights")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCorrelationMatrix(t *testing.T) {
|
func TestCorrelationMatrix(t *testing.T) {
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
data *mat64.Dense
|
data *mat.Dense
|
||||||
weights []float64
|
weights []float64
|
||||||
ans *mat64.Dense
|
ans *mat.Dense
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
data: mat64.NewDense(3, 3, []float64{
|
data: mat.NewDense(3, 3, []float64{
|
||||||
1, 2, 3,
|
1, 2, 3,
|
||||||
3, 4, 5,
|
3, 4, 5,
|
||||||
5, 6, 7,
|
5, 6, 7,
|
||||||
}),
|
}),
|
||||||
weights: nil,
|
weights: nil,
|
||||||
ans: mat64.NewDense(3, 3, []float64{
|
ans: mat.NewDense(3, 3, []float64{
|
||||||
1, 1, 1,
|
1, 1, 1,
|
||||||
1, 1, 1,
|
1, 1, 1,
|
||||||
1, 1, 1,
|
1, 1, 1,
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
data: mat64.NewDense(5, 2, []float64{
|
data: mat.NewDense(5, 2, []float64{
|
||||||
-2, -4,
|
-2, -4,
|
||||||
-1, 2,
|
-1, 2,
|
||||||
0, 0,
|
0, 0,
|
||||||
@@ -126,12 +126,12 @@ func TestCorrelationMatrix(t *testing.T) {
|
|||||||
2, 4,
|
2, 4,
|
||||||
}),
|
}),
|
||||||
weights: nil,
|
weights: nil,
|
||||||
ans: mat64.NewDense(2, 2, []float64{
|
ans: mat.NewDense(2, 2, []float64{
|
||||||
1, 0.6,
|
1, 0.6,
|
||||||
0.6, 1,
|
0.6, 1,
|
||||||
}),
|
}),
|
||||||
}, {
|
}, {
|
||||||
data: mat64.NewDense(3, 2, []float64{
|
data: mat.NewDense(3, 2, []float64{
|
||||||
1, 1,
|
1, 1,
|
||||||
2, 4,
|
2, 4,
|
||||||
3, 9,
|
3, 9,
|
||||||
@@ -141,7 +141,7 @@ func TestCorrelationMatrix(t *testing.T) {
|
|||||||
1.5,
|
1.5,
|
||||||
1,
|
1,
|
||||||
},
|
},
|
||||||
ans: mat64.NewDense(2, 2, []float64{
|
ans: mat.NewDense(2, 2, []float64{
|
||||||
1, 0.9868703275903379,
|
1, 0.9868703275903379,
|
||||||
0.9868703275903379, 1,
|
0.9868703275903379, 1,
|
||||||
}),
|
}),
|
||||||
@@ -156,9 +156,9 @@ func TestCorrelationMatrix(t *testing.T) {
|
|||||||
if test.weights != nil {
|
if test.weights != nil {
|
||||||
copy(w, test.weights)
|
copy(w, test.weights)
|
||||||
}
|
}
|
||||||
for _, corr := range []*mat64.SymDense{nil, {}} {
|
for _, corr := range []*mat.SymDense{nil, {}} {
|
||||||
c := CorrelationMatrix(corr, test.data, test.weights)
|
c := CorrelationMatrix(corr, test.data, test.weights)
|
||||||
if !mat64.Equal(c, test.ans) {
|
if !mat.Equal(c, test.ans) {
|
||||||
t.Errorf("%d: expected corr %v, found %v", i, test.ans, c)
|
t.Errorf("%d: expected corr %v, found %v", i, test.ans, c)
|
||||||
}
|
}
|
||||||
if !floats.Equal(d, r.Data) {
|
if !floats.Equal(d, r.Data) {
|
||||||
@@ -172,8 +172,8 @@ func TestCorrelationMatrix(t *testing.T) {
|
|||||||
_, cols := c.Dims()
|
_, cols := c.Dims()
|
||||||
for ci := 0; ci < cols; ci++ {
|
for ci := 0; ci < cols; ci++ {
|
||||||
for cj := 0; cj < cols; cj++ {
|
for cj := 0; cj < cols; cj++ {
|
||||||
x := mat64.Col(nil, ci, test.data)
|
x := mat.Col(nil, ci, test.data)
|
||||||
y := mat64.Col(nil, cj, test.data)
|
y := mat.Col(nil, cj, test.data)
|
||||||
corr := Correlation(x, y, test.weights)
|
corr := Correlation(x, y, test.weights)
|
||||||
if math.Abs(corr-c.At(ci, cj)) > 1e-14 {
|
if math.Abs(corr-c.At(ci, cj)) > 1e-14 {
|
||||||
t.Errorf("CorrMat does not match at (%v, %v). Want %v, got %v.", ci, cj, corr, c.At(ci, cj))
|
t.Errorf("CorrMat does not match at (%v, %v). Want %v, got %v.", ci, cj, corr, c.At(ci, cj))
|
||||||
@@ -183,13 +183,13 @@ func TestCorrelationMatrix(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if !Panics(func() { CorrelationMatrix(nil, mat64.NewDense(5, 2, nil), []float64{}) }) {
|
if !Panics(func() { CorrelationMatrix(nil, mat.NewDense(5, 2, nil), []float64{}) }) {
|
||||||
t.Errorf("CorrelationMatrix did not panic with weight size mismatch")
|
t.Errorf("CorrelationMatrix did not panic with weight size mismatch")
|
||||||
}
|
}
|
||||||
if !Panics(func() { CorrelationMatrix(mat64.NewSymDense(1, nil), mat64.NewDense(5, 2, nil), nil) }) {
|
if !Panics(func() { CorrelationMatrix(mat.NewSymDense(1, nil), mat.NewDense(5, 2, nil), nil) }) {
|
||||||
t.Errorf("CorrelationMatrix did not panic with preallocation size mismatch")
|
t.Errorf("CorrelationMatrix did not panic with preallocation size mismatch")
|
||||||
}
|
}
|
||||||
if !Panics(func() { CorrelationMatrix(nil, mat64.NewDense(2, 2, []float64{1, 2, 3, 4}), []float64{1, -1}) }) {
|
if !Panics(func() { CorrelationMatrix(nil, mat.NewDense(2, 2, []float64{1, 2, 3, 4}), []float64{1, -1}) }) {
|
||||||
t.Errorf("CorrelationMatrix did not panic with negative weights")
|
t.Errorf("CorrelationMatrix did not panic with negative weights")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -197,11 +197,11 @@ func TestCorrelationMatrix(t *testing.T) {
|
|||||||
func TestCorrCov(t *testing.T) {
|
func TestCorrCov(t *testing.T) {
|
||||||
// test both Cov2Corr and Cov2Corr
|
// test both Cov2Corr and Cov2Corr
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
data *mat64.Dense
|
data *mat.Dense
|
||||||
weights []float64
|
weights []float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
data: mat64.NewDense(3, 3, []float64{
|
data: mat.NewDense(3, 3, []float64{
|
||||||
1, 2, 3,
|
1, 2, 3,
|
||||||
3, 4, 5,
|
3, 4, 5,
|
||||||
5, 6, 7,
|
5, 6, 7,
|
||||||
@@ -209,7 +209,7 @@ func TestCorrCov(t *testing.T) {
|
|||||||
weights: nil,
|
weights: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
data: mat64.NewDense(5, 2, []float64{
|
data: mat.NewDense(5, 2, []float64{
|
||||||
-2, -4,
|
-2, -4,
|
||||||
-1, 2,
|
-1, 2,
|
||||||
0, 0,
|
0, 0,
|
||||||
@@ -218,7 +218,7 @@ func TestCorrCov(t *testing.T) {
|
|||||||
}),
|
}),
|
||||||
weights: nil,
|
weights: nil,
|
||||||
}, {
|
}, {
|
||||||
data: mat64.NewDense(3, 2, []float64{
|
data: mat.NewDense(3, 2, []float64{
|
||||||
1, 1,
|
1, 1,
|
||||||
2, 4,
|
2, 4,
|
||||||
3, 9,
|
3, 9,
|
||||||
@@ -241,22 +241,22 @@ func TestCorrCov(t *testing.T) {
|
|||||||
sigmas[i] = math.Sqrt(cov.At(i, i))
|
sigmas[i] = math.Sqrt(cov.At(i, i))
|
||||||
}
|
}
|
||||||
|
|
||||||
covFromCorr := mat64.NewSymDense(corr.Symmetric(), nil)
|
covFromCorr := mat.NewSymDense(corr.Symmetric(), nil)
|
||||||
covFromCorr.CopySym(corr)
|
covFromCorr.CopySym(corr)
|
||||||
corrToCov(covFromCorr, sigmas)
|
corrToCov(covFromCorr, sigmas)
|
||||||
|
|
||||||
corrFromCov := mat64.NewSymDense(cov.Symmetric(), nil)
|
corrFromCov := mat.NewSymDense(cov.Symmetric(), nil)
|
||||||
corrFromCov.CopySym(cov)
|
corrFromCov.CopySym(cov)
|
||||||
covToCorr(corrFromCov)
|
covToCorr(corrFromCov)
|
||||||
|
|
||||||
if !mat64.EqualApprox(corr, corrFromCov, 1e-14) {
|
if !mat.EqualApprox(corr, corrFromCov, 1e-14) {
|
||||||
t.Errorf("%d: corrToCov did not match direct Correlation calculation. Want: %v, got: %v. ", i, corr, corrFromCov)
|
t.Errorf("%d: corrToCov did not match direct Correlation calculation. Want: %v, got: %v. ", i, corr, corrFromCov)
|
||||||
}
|
}
|
||||||
if !mat64.EqualApprox(cov, covFromCorr, 1e-14) {
|
if !mat.EqualApprox(cov, covFromCorr, 1e-14) {
|
||||||
t.Errorf("%d: covToCorr did not match direct Covariance calculation. Want: %v, got: %v. ", i, cov, covFromCorr)
|
t.Errorf("%d: covToCorr did not match direct Covariance calculation. Want: %v, got: %v. ", i, cov, covFromCorr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !Panics(func() { corrToCov(mat64.NewSymDense(2, nil), []float64{}) }) {
|
if !Panics(func() { corrToCov(mat.NewSymDense(2, nil), []float64{}) }) {
|
||||||
t.Errorf("CorrelationMatrix did not panic with sigma size mismatch")
|
t.Errorf("CorrelationMatrix did not panic with sigma size mismatch")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -265,14 +265,14 @@ func TestCorrCov(t *testing.T) {
|
|||||||
func TestMahalanobis(t *testing.T) {
|
func TestMahalanobis(t *testing.T) {
|
||||||
// Comparison with scipy.
|
// Comparison with scipy.
|
||||||
for cas, test := range []struct {
|
for cas, test := range []struct {
|
||||||
x, y *mat64.Vector
|
x, y *mat.Vector
|
||||||
Sigma *mat64.SymDense
|
Sigma *mat.SymDense
|
||||||
ans float64
|
ans float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
x: mat64.NewVector(3, []float64{1, 2, 3}),
|
x: mat.NewVector(3, []float64{1, 2, 3}),
|
||||||
y: mat64.NewVector(3, []float64{0.8, 1.1, -1}),
|
y: mat.NewVector(3, []float64{0.8, 1.1, -1}),
|
||||||
Sigma: mat64.NewSymDense(3,
|
Sigma: mat.NewSymDense(3,
|
||||||
[]float64{
|
[]float64{
|
||||||
0.8, 0.3, 0.1,
|
0.8, 0.3, 0.1,
|
||||||
0.3, 0.7, -0.1,
|
0.3, 0.7, -0.1,
|
||||||
@@ -280,7 +280,7 @@ func TestMahalanobis(t *testing.T) {
|
|||||||
ans: 1.9251757377680914,
|
ans: 1.9251757377680914,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
var chol mat64.Cholesky
|
var chol mat.Cholesky
|
||||||
ok := chol.Factorize(test.Sigma)
|
ok := chol.Factorize(test.Sigma)
|
||||||
if !ok {
|
if !ok {
|
||||||
panic("bad test")
|
panic("bad test")
|
||||||
@@ -294,21 +294,21 @@ func TestMahalanobis(t *testing.T) {
|
|||||||
|
|
||||||
// benchmarks
|
// benchmarks
|
||||||
|
|
||||||
func randMat(r, c int) mat64.Matrix {
|
func randMat(r, c int) mat.Matrix {
|
||||||
x := make([]float64, r*c)
|
x := make([]float64, r*c)
|
||||||
for i := range x {
|
for i := range x {
|
||||||
x[i] = rand.Float64()
|
x[i] = rand.Float64()
|
||||||
}
|
}
|
||||||
return mat64.NewDense(r, c, x)
|
return mat.NewDense(r, c, x)
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkCovarianceMatrix(b *testing.B, m mat64.Matrix) {
|
func benchmarkCovarianceMatrix(b *testing.B, m mat.Matrix) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
CovarianceMatrix(nil, m, nil)
|
CovarianceMatrix(nil, m, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func benchmarkCovarianceMatrixWeighted(b *testing.B, m mat64.Matrix) {
|
func benchmarkCovarianceMatrixWeighted(b *testing.B, m mat.Matrix) {
|
||||||
r, _ := m.Dims()
|
r, _ := m.Dims()
|
||||||
wts := make([]float64, r)
|
wts := make([]float64, r)
|
||||||
for i := range wts {
|
for i := range wts {
|
||||||
@@ -319,9 +319,9 @@ func benchmarkCovarianceMatrixWeighted(b *testing.B, m mat64.Matrix) {
|
|||||||
CovarianceMatrix(nil, m, wts)
|
CovarianceMatrix(nil, m, wts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func benchmarkCovarianceMatrixInPlace(b *testing.B, m mat64.Matrix) {
|
func benchmarkCovarianceMatrixInPlace(b *testing.B, m mat.Matrix) {
|
||||||
_, c := m.Dims()
|
_, c := m.Dims()
|
||||||
res := mat64.NewSymDense(c, nil)
|
res := mat.NewSymDense(c, nil)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
CovarianceMatrix(res, m, nil)
|
CovarianceMatrix(res, m, nil)
|
||||||
@@ -434,7 +434,7 @@ func BenchmarkCovToCorr(b *testing.B) {
|
|||||||
// generate a 10x10 covariance matrix
|
// generate a 10x10 covariance matrix
|
||||||
m := randMat(small, small)
|
m := randMat(small, small)
|
||||||
c := CovarianceMatrix(nil, m, nil)
|
c := CovarianceMatrix(nil, m, nil)
|
||||||
cc := mat64.NewSymDense(c.Symmetric(), nil)
|
cc := mat.NewSymDense(c.Symmetric(), nil)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
@@ -448,7 +448,7 @@ func BenchmarkCorrToCov(b *testing.B) {
|
|||||||
// generate a 10x10 correlation matrix
|
// generate a 10x10 correlation matrix
|
||||||
m := randMat(small, small)
|
m := randMat(small, small)
|
||||||
c := CorrelationMatrix(nil, m, nil)
|
c := CorrelationMatrix(nil, m, nil)
|
||||||
cc := mat64.NewSymDense(c.Symmetric(), nil)
|
cc := mat.NewSymDense(c.Symmetric(), nil)
|
||||||
sigma := make([]float64, small)
|
sigma := make([]float64, small)
|
||||||
for i := range sigma {
|
for i := range sigma {
|
||||||
sigma[i] = 2
|
sigma[i] = 2
|
||||||
|
Reference in New Issue
Block a user