graph: use slices package for sorting and reversing slices

This commit is contained in:
Jonathan Bluett-Duncan
2024-03-06 22:38:22 +00:00
committed by Dan Kortschak
parent a9b228ed6b
commit bdcda9a453
29 changed files with 224 additions and 295 deletions

View File

@@ -9,6 +9,7 @@ package coloring
import (
"errors"
"slices"
"sort"
"golang.org/x/exp/rand"
@@ -31,7 +32,7 @@ func Sets(colors map[int64]int) map[int][]int64 {
sets[c] = append(sets[c], id)
}
for _, s := range sets {
sort.Slice(s, func(i, j int) bool { return s[i] < s[j] })
slices.Sort(s)
}
return sets
}

View File

@@ -6,7 +6,7 @@ package community
import (
"fmt"
"sort"
"slices"
"golang.org/x/exp/rand"
@@ -347,7 +347,7 @@ func newSlice(s set.Ints[int]) *slice {
for i := range s {
elems = append(elems, i)
}
sort.Ints(elems)
slices.Sort(elems)
return &slice{elems: elems}
}

View File

@@ -6,7 +6,7 @@ package community
import (
"math"
"sort"
"slices"
"golang.org/x/exp/rand"
@@ -617,7 +617,7 @@ func (l *directedLocalMover) deltaQ(n graph.Node) (deltaQ float64, dst int, src
for i := range connected {
candidates = append(candidates, i)
}
sort.Ints(candidates)
slices.Sort(candidates)
// Calculate the highest modularity gain
// from moving into another community and

View File

@@ -9,7 +9,6 @@ import (
"math"
"reflect"
"slices"
"sort"
"testing"
"golang.org/x/exp/rand"
@@ -609,7 +608,7 @@ func TestLouvainDirectedMultiplex(t *testing.T) {
}
// Recovery of Q values is reversed.
if slices.Reverse(qs); !sort.Float64sAreSorted(qs) {
if slices.Reverse(qs); !slices.IsSorted(qs) {
t.Errorf("Q values not monotonically increasing: %.5v", qs)
}
}

View File

@@ -8,7 +8,6 @@ import (
"math"
"reflect"
"slices"
"sort"
"testing"
"golang.org/x/exp/rand"
@@ -632,7 +631,7 @@ func testModularizeDirected(t *testing.T, test communityDirectedQTest, g graph.D
}
// Recovery of Q values is reversed.
if slices.Reverse(qs); !sort.Float64sAreSorted(qs) {
if slices.Reverse(qs); !slices.IsSorted(qs) {
t.Errorf("Q values not monotonically increasing: %.5v", qs)
}
}

View File

@@ -6,7 +6,7 @@ package community
import (
"math"
"sort"
"slices"
"golang.org/x/exp/rand"
@@ -554,7 +554,7 @@ func (l *undirectedLocalMover) deltaQ(n graph.Node) (deltaQ float64, dst int, sr
for i := range connected {
candidates = append(candidates, i)
}
sort.Ints(candidates)
slices.Sort(candidates)
// Calculate the highest modularity gain
// from moving into another community and

View File

@@ -9,7 +9,6 @@ import (
"math"
"reflect"
"slices"
"sort"
"testing"
"golang.org/x/exp/rand"
@@ -578,7 +577,7 @@ func TestLouvainMultiplex(t *testing.T) {
}
// Recovery of Q values is reversed.
if slices.Reverse(qs); !sort.Float64sAreSorted(qs) {
if slices.Reverse(qs); !slices.IsSorted(qs) {
t.Errorf("Q values not monotonically increasing: %.5v", qs)
}
}

View File

@@ -8,7 +8,6 @@ import (
"math"
"reflect"
"slices"
"sort"
"testing"
"golang.org/x/exp/rand"
@@ -695,7 +694,7 @@ func testModularizeUndirected(t *testing.T, test communityUndirectedQTest, g gra
}
// Recovery of Q values is reversed.
if slices.Reverse(qs); !sort.Float64sAreSorted(qs) {
if slices.Reverse(qs); !slices.IsSorted(qs) {
t.Errorf("Q values not monotonically increasing: %.5v", qs)
}
}

View File

@@ -9,7 +9,7 @@ import (
"errors"
"fmt"
"os/exec"
"sort"
"slices"
"strconv"
"strings"
"testing"
@@ -211,7 +211,7 @@ func (a attributes) Attributes() []encoding.Attribute {
for k := range a {
keys = append(keys, k)
}
sort.Strings(keys)
slices.Sort(keys)
attr := make([]encoding.Attribute, 0, len(keys))
for _, k := range keys {
v := a[k]

View File

@@ -10,7 +10,7 @@ package rdf
import (
"fmt"
"os"
"sort"
"slices"
"strings"
"text/tabwriter"
)
@@ -47,7 +47,7 @@ func (d debugger) logHashes(depth int, hashes map[string][]byte, size int) {
keys[i] = k
i++
}
sort.Strings(keys)
slices.Sort(keys)
w := tabwriter.NewWriter(os.Stderr, 0, 4, 8, ' ', 0)
for _, k := range keys {
fmt.Fprintf(w, prefix+"%s\t%0*x\n", k, 2*size, hashes[k])

View File

@@ -7,7 +7,7 @@ package rdf_test
import (
"io"
"math"
"sort"
"slices"
"strings"
"testing"
@@ -230,7 +230,7 @@ func TestRemoveStatement(t *testing.T) {
for it.Next() {
gotStatements = append(gotStatements, it.Statement().String())
}
sort.Strings(gotStatements)
slices.Sort(gotStatements)
got := strings.TrimSpace(strings.Join(gotStatements, "\n"))
want := strings.TrimSpace(test.want)
@@ -349,7 +349,7 @@ func TestRemoveTerm(t *testing.T) {
for it.Next() {
gotStatements = append(gotStatements, it.Statement().String())
}
sort.Strings(gotStatements)
slices.Sort(gotStatements)
got := strings.TrimSpace(strings.Join(gotStatements, "\n"))
want := strings.TrimSpace(test.want)

View File

@@ -6,10 +6,14 @@ package rdf
import (
"bytes"
"cmp"
"errors"
"fmt"
"hash"
"slices"
"sort"
"gonum.org/v1/gonum/internal/order"
)
// See "Canonical Forms for Isomorphic and Equivalent RDF Graphs: Algorithms
@@ -53,7 +57,7 @@ func lexicalHashes(dst [][]byte, hashes map[string][]byte) {
dst[i] = s
i++
}
sort.Sort(lexical(dst))
order.BySliceValues(dst)
}
// IsoCanonicalHashes returns a mapping between the nodes of the RDF graph
@@ -185,7 +189,7 @@ func C14n(dst, src []*Statement, terms map[string]map[string]bool) ([]*Statement
blanks[i] = h
i++
}
sort.Strings(blanks)
slices.Sort(blanks)
c14n := make(map[string]string)
for i, b := range blanks {
@@ -218,7 +222,7 @@ func C14n(dst, src []*Statement, terms map[string]map[string]bool) ([]*Statement
n.Object = Term{Value: translate(s.Object.Value, c14n)}
n.Label = Term{Value: translate(s.Label.Value, c14n)}
}
sort.Sort(c14nStatements(dst))
sortC14nStatements(dst)
return dst, nil
}
@@ -230,33 +234,20 @@ func translate(term string, mapping map[string]string) string {
return term
}
type c14nStatements []*Statement
func sortC14nStatements(statements []*Statement) {
slices.SortFunc(statements, func(a, b *Statement) int {
if n := cmp.Compare(a.Subject.Value, b.Subject.Value); n != 0 {
return n
}
func (s c14nStatements) Len() int { return len(s) }
func (s c14nStatements) Less(i, j int) bool {
si := s[i]
sj := s[j]
switch {
case si.Subject.Value < sj.Subject.Value:
return true
case si.Subject.Value > sj.Subject.Value:
return false
}
switch { // Always IRI.
case si.Predicate.Value < sj.Predicate.Value:
return true
case si.Predicate.Value > sj.Predicate.Value:
return false
}
switch {
case si.Object.Value < sj.Object.Value:
return true
case si.Object.Value > sj.Object.Value:
return false
}
return si.Label.Value < sj.Label.Value
// Always IRI.
if n := cmp.Compare(a.Predicate.Value, b.Predicate.Value); n != 0 {
return n
}
return cmp.Compare(a.Object.Value, b.Object.Value)
})
}
func (s c14nStatements) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// hashBNodes returns the hashed blank nodes of the graph described by statements
// using the provided hash function. Hashes are initialised with zero.
@@ -497,20 +488,13 @@ func (b hashBag) add(term string, hash []byte) {
// state and returns the hash.
func (b hashBag) sum(term string) []byte {
p := b.hashesFor[term]
sort.Sort(lexical(p))
order.BySliceValues(p)
h := hashTuple(b.hash, p...)
b.hashesFor[term] = b.hashesFor[term][:1]
b.hashesFor[term][0] = h
return h
}
// lexical implements lexical sorting of [][]byte.
type lexical [][]byte
func (b lexical) Len() int { return len(b) }
func (b lexical) Less(i, j int) bool { return string(b[i]) < string(b[j]) }
func (b lexical) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// hashTuple returns the h hash of the concatenation of t.
func hashTuple(h hash.Hash, t ...[]byte) []byte {
h.Reset()

View File

@@ -9,7 +9,7 @@ import (
"fmt"
"log"
"os"
"sort"
"slices"
"strings"
"text/tabwriter"
@@ -76,7 +76,7 @@ _:greet <l:is> "hola"@es .
blanks = append(blanks, k)
}
}
sort.Strings(blanks)
slices.Sort(blanks)
if len(blanks) == 0 {
fmt.Println("No blank nodes.")
@@ -163,7 +163,7 @@ _:c1 <ex:r> _:d1 .
blanks = append(blanks, k)
}
}
sort.Strings(blanks)
slices.Sort(blanks)
if len(blanks) == 0 {
fmt.Println("No blank nodes.")

View File

@@ -5,6 +5,7 @@
package rdf
import (
"cmp"
"crypto/md5"
"flag"
"fmt"
@@ -13,6 +14,7 @@ import (
"os"
"path/filepath"
"reflect"
"slices"
"sort"
"testing"
"text/tabwriter"
@@ -82,10 +84,10 @@ func TestIsoCanonicalHashes(t *testing.T) {
}
if last != nil {
last := relabelStatements(statements, termsFor(last, hash))
sort.Sort(simpleLexicalStatements(last))
sortSimpleLexicalStatements(last)
curr := relabelStatements(statements, termsFor(curr, hash))
sort.Sort(simpleLexicalStatements(curr))
sortSimpleLexicalStatements(curr)
if !reflect.DeepEqual(last, curr) {
t.Errorf("IsoCanonicalHashes was not stable between runs on %q with decomp=%t",
@@ -154,7 +156,7 @@ func TestIsoCanonicalHashes(t *testing.T) {
// Relabel a copy of the statements and then sort.
orig := relabelStatements(statements, termsFor(hashes, hash))
sort.Sort(simpleLexicalStatements(orig))
sortSimpleLexicalStatements(orig)
for _, perm := range []struct {
name string
@@ -216,7 +218,7 @@ func TestIsoCanonicalHashes(t *testing.T) {
keys[i] = k
i++
}
sort.Strings(keys)
slices.Sort(keys)
w := tabwriter.NewWriter(os.Stderr, 0, 4, 8, ' ', 0)
for _, k := range keys {
fmt.Fprintf(w, "\t%s\t%s\n", k, translate(k, terms))
@@ -227,7 +229,7 @@ func TestIsoCanonicalHashes(t *testing.T) {
// Relabel a copy of the alternative statements and then sort.
alt := relabelStatements(altStatements, termsFor(altHashes, hash))
sort.Sort(simpleLexicalStatements(alt))
sortSimpleLexicalStatements(alt)
for i := range statements {
if *orig[i] != *alt[i] { // Otherwise we have pointer inequality.
@@ -290,7 +292,7 @@ func permuteBlanks(s []*Statement, src rand.Source) ([]*Statement, map[string]st
blanks = append(blanks, t)
}
}
sort.Strings(blanks)
slices.Sort(blanks)
for x, y := range rnd.Perm(len(blanks)) {
terms[blanks[x]] = blanks[y]
}
@@ -369,7 +371,7 @@ func mergeFirst2B(s []*Statement) ([]*Statement, map[string]string) {
blanks[i] = b
i++
}
sort.Strings(blanks)
slices.Sort(blanks)
terms[blanks[1]] = terms[blanks[0]]
m := relabelStatements(s, terms)
@@ -428,7 +430,7 @@ func TestLexicalStatements(t *testing.T) {
// Relabel a copy of the statements and then sort.
direct := relabelStatements(statements, terms)
sort.Sort(simpleLexicalStatements(direct))
sortSimpleLexicalStatements(direct)
for i := range statements {
if *indirect[i] != *direct[i] { // Otherwise we have pointer inequality.
@@ -450,36 +452,21 @@ func termsFor(hashes map[string][]byte, hash hash.Hash) map[string]string {
return terms
}
// simpleLexicalStatements implements lexical statement sorting on the
// sortSimpleLexicalStatements implements lexical statement sorting on the
// literal values without interpolation.
type simpleLexicalStatements []*Statement
func sortSimpleLexicalStatements(statements []*Statement) {
slices.SortFunc(statements, func(a, b *Statement) int {
if n := cmp.Compare(unquoteIRI(a.Subject.Value), unquoteIRI(b.Subject.Value)); n != 0 {
return n
}
func (s simpleLexicalStatements) Len() int { return len(s) }
func (s simpleLexicalStatements) Less(i, j int) bool {
si := s[i]
sj := s[j]
switch {
case unquoteIRI(si.Subject.Value) < unquoteIRI(sj.Subject.Value):
return true
case unquoteIRI(si.Subject.Value) > unquoteIRI(sj.Subject.Value):
return false
}
switch { // Always IRI.
case si.Predicate.Value < sj.Predicate.Value:
return true
case si.Predicate.Value > sj.Predicate.Value:
return false
}
switch {
case unquoteIRI(si.Object.Value) < unquoteIRI(sj.Object.Value):
return true
case unquoteIRI(si.Object.Value) > unquoteIRI(sj.Object.Value):
return false
}
return unquoteIRI(si.Label.Value) < unquoteIRI(sj.Label.Value)
}
func (s simpleLexicalStatements) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
// Always IRI.
if n := cmp.Compare(unquoteIRI(a.Predicate.Value), unquoteIRI(b.Predicate.Value)); n != 0 {
return n
}
return cmp.Compare(unquoteIRI(a.Object.Value), unquoteIRI(b.Object.Value))
})
}
func relabelStatements(s []*Statement, terms map[string]string) []*Statement {

View File

@@ -5,9 +5,8 @@
package rdf
import (
"sort"
"gonum.org/v1/gonum/graph"
"gonum.org/v1/gonum/internal/order"
)
// Query represents a step in an RDF graph query. The methods on Query
@@ -147,8 +146,8 @@ func (q Query) And(p Query) Query {
if q.g != p.g {
panic("rdf: binary query operation parameters from distinct graphs")
}
sortByID(q.terms)
sortByID(p.terms)
order.ByID(q.terms)
order.ByID(p.terms)
r := Query{g: q.g}
var i, j int
for i < len(q.terms) && j < len(p.terms) {
@@ -173,8 +172,8 @@ func (q Query) Or(p Query) Query {
if q.g != p.g {
panic("rdf: binary query operation parameters from distinct graphs")
}
sortByID(q.terms)
sortByID(p.terms)
order.ByID(q.terms)
order.ByID(p.terms)
r := Query{g: q.g}
var i, j int
for i < len(q.terms) && j < len(p.terms) {
@@ -209,8 +208,8 @@ func (q Query) Not(p Query) Query {
if q.g != p.g {
panic("rdf: binary query operation parameters from distinct graphs")
}
sortByID(q.terms)
sortByID(p.terms)
order.ByID(q.terms)
order.ByID(p.terms)
r := Query{g: q.g}
var i, j int
for i < len(q.terms) && j < len(p.terms) {
@@ -257,7 +256,7 @@ func (q Query) Repeat(fn func(Query) (q Query, ok bool)) Query {
// Unique returns a copy of the receiver that contains only one instance
// of each term.
func (q Query) Unique() Query {
sortByID(q.terms)
order.ByID(q.terms)
r := Query{g: q.g}
for i, t := range q.terms {
if i == 0 || t.UID != q.terms[i-1].UID {
@@ -276,7 +275,3 @@ func (q Query) Len() int {
func (q Query) Result() []Term {
return q.terms
}
func sortByID(terms []Term) {
sort.Slice(terms, func(i, j int) bool { return terms[i].ID() < terms[j].ID() })
}

View File

@@ -11,6 +11,8 @@ import (
"testing"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/internal/order"
)
var andTests = []struct {
@@ -58,8 +60,8 @@ func TestQueryAnd(t *testing.T) {
b := Query{terms: permutedTerms(test.b, src)}
got := a.And(b).Result()
sortByID(got)
sortByID(test.want)
order.ByID(got)
order.ByID(test.want)
if !reflect.DeepEqual(got, test.want) {
t.Errorf("unexpected result for test %q:\ngot: %v\nwant:%v",
@@ -114,8 +116,8 @@ func TestQueryOr(t *testing.T) {
b := Query{terms: permutedTerms(test.b, src)}
got := a.Or(b).Result()
sortByID(got)
sortByID(test.want)
order.ByID(got)
order.ByID(test.want)
if !reflect.DeepEqual(got, test.want) {
t.Errorf("unexpected result for test %q:\ngot: %v\nwant:%v",
@@ -170,8 +172,8 @@ func TestQueryNot(t *testing.T) {
b := Query{terms: permutedTerms(test.b, src)}
got := a.Not(b).Result()
sortByID(got)
sortByID(test.want)
order.ByID(got)
order.ByID(test.want)
if !reflect.DeepEqual(got, test.want) {
t.Errorf("unexpected result for test %q:\ngot: %v\nwant:%v",
@@ -276,8 +278,8 @@ func TestQueryUnique(t *testing.T) {
a := Query{terms: permutedTerms(test.in, src)}
got := a.Unique().Result()
sortByID(got)
sortByID(test.want)
order.ByID(got)
order.ByID(test.want)
if !reflect.DeepEqual(got, test.want) {
t.Errorf("unexpected result for test %q:\ngot: %v\nwant:%v",
@@ -424,8 +426,8 @@ func TestQueryHasAllOut(t *testing.T) {
a := Query{g: g, terms: test.in}
got := a.HasAllOut(test.fn).Result()
sortByID(got)
sortByID(test.wantAll)
order.ByID(got)
order.ByID(test.wantAll)
if !reflect.DeepEqual(got, test.wantAll) {
t.Errorf("unexpected result for test %q:\ngot: %v\nwant:%v",
@@ -433,7 +435,7 @@ func TestQueryHasAllOut(t *testing.T) {
}
cons := test.cons(a).Result()
sortByID(cons)
order.ByID(cons)
if !reflect.DeepEqual(got, cons) {
t.Errorf("unexpected construction result for test %q:\ngot: %v\nwant:%v",
test.name, got, cons)
@@ -463,8 +465,8 @@ func TestQueryHasAnyOut(t *testing.T) {
a := Query{g: g, terms: test.in}
got := a.HasAnyOut(test.fn).Result()
sortByID(got)
sortByID(test.wantAny)
order.ByID(got)
order.ByID(test.wantAny)
if !reflect.DeepEqual(got, test.wantAny) {
t.Errorf("unexpected result for test %q:\ngot: %v\nwant:%v",
@@ -593,8 +595,8 @@ func TestQueryHasAllIn(t *testing.T) {
a := Query{g: g, terms: test.in}
got := a.HasAllIn(test.fn).Result()
sortByID(got)
sortByID(test.wantAll)
order.ByID(got)
order.ByID(test.wantAll)
if !reflect.DeepEqual(got, test.wantAll) {
t.Errorf("unexpected result for test %q:\ngot: %v\nwant:%v",
@@ -602,7 +604,7 @@ func TestQueryHasAllIn(t *testing.T) {
}
cons := test.cons(a).Result()
sortByID(cons)
order.ByID(cons)
if !reflect.DeepEqual(got, cons) {
t.Errorf("unexpected construction result for test %q:\ngot: %v\nwant:%v",
test.name, got, cons)
@@ -632,8 +634,8 @@ func TestQueryHasAnyIn(t *testing.T) {
a := Query{g: g, terms: test.in}
got := a.HasAnyIn(test.fn).Result()
sortByID(got)
sortByID(test.wantAny)
order.ByID(got)
order.ByID(test.wantAny)
if !reflect.DeepEqual(got, test.wantAny) {
t.Errorf("unexpected result for test %q:\ngot: %v\nwant:%v",

View File

@@ -11,7 +11,7 @@ import (
"errors"
"fmt"
"hash"
"sort"
"slices"
"gonum.org/v1/gonum/stat/combin"
)
@@ -23,7 +23,7 @@ func Deduplicate(s []*Statement) []*Statement {
if len(s) < 2 {
return s
}
sort.Sort(c14nStatements(s))
sortC14nStatements(s)
curr := 0
for i, e := range s {
if isSameStatement(e, s[curr]) {
@@ -217,7 +217,7 @@ func (u *urna) relabel(dst, src []*Statement) ([]*Statement, error) {
n.Object = Term{Value: translateURNA(s.Object.Value, u.canon.issued), UID: s.Object.UID}
n.Label = Term{Value: translateURNA(s.Label.Value, u.canon.issued), UID: s.Label.UID}
}
sort.Sort(c14nStatements(dst))
sortC14nStatements(dst)
return dst, nil
}
@@ -230,7 +230,7 @@ func lexicallySortedPathHashes(paths map[string][]*issuer) []string {
lexicalHashPaths[i] = h
i++
}
sort.Strings(lexicalHashPaths)
slices.Sort(lexicalHashPaths)
return lexicalHashPaths
}
@@ -273,7 +273,7 @@ func (u *urna) hashFirstDegreeQuads(b string) string {
statements = append(statements, &n)
}
sort.Sort(c14nStatements(statements)) // 4.
sortC14nStatements(statements) // 4.
// 5.
u.hash.Reset()
@@ -384,7 +384,7 @@ func lexicallySortedTermHashes(termsFor map[string][]string) []string {
lexicalHashes[i] = h
i++
}
sort.Strings(lexicalHashes)
slices.Sort(lexicalHashes)
return lexicalHashes
}

View File

@@ -6,12 +6,12 @@ package iterator_test
import (
"reflect"
"sort"
"testing"
"gonum.org/v1/gonum/graph"
"gonum.org/v1/gonum/graph/iterator"
"gonum.org/v1/gonum/graph/simple"
"gonum.org/v1/gonum/internal/order"
)
type line struct{ f, t, id int64 }
@@ -62,8 +62,8 @@ func TestLinesSlice(t *testing.T) {
for _, l := range test.lines {
want = append(want, l)
}
sort.Slice(got, func(i, j int) bool { return got[i].ID() < got[j].ID() })
sort.Slice(want, func(i, j int) bool { return want[i].ID() < want[j].ID() })
order.ByID(got)
order.ByID(want)
if !reflect.DeepEqual(got, want) {
t.Errorf("unexpected iterator output for round %d: got:%#v want:%#v", i, got, want)
}
@@ -167,8 +167,8 @@ func TestWeightedLinesSlice(t *testing.T) {
for _, l := range test.lines {
want = append(want, l)
}
sort.Slice(got, func(i, j int) bool { return got[i].ID() < got[j].ID() })
sort.Slice(want, func(i, j int) bool { return want[i].ID() < want[j].ID() })
order.ByID(got)
order.ByID(want)
if !reflect.DeepEqual(got, want) {
t.Errorf("unexpected iterator output for round %d: got:%#v want:%#v", i, got, want)
}

View File

@@ -5,9 +5,10 @@
package network
import (
"cmp"
"fmt"
"math"
"sort"
"slices"
"testing"
"gonum.org/v1/gonum/floats/scalar"
@@ -311,11 +312,16 @@ func TestEdgeBetweennessWeighted(t *testing.T) {
}
func orderedPairFloats(w map[[2]int64]float64, prec int) []pairKeyFloatVal {
o := make(orderedPairFloatsMap, 0, len(w))
o := make([]pairKeyFloatVal, 0, len(w))
for k, v := range w {
o = append(o, pairKeyFloatVal{prec: prec, key: k, val: v})
}
sort.Sort(o)
slices.SortFunc(o, func(a, b pairKeyFloatVal) int {
if n := cmp.Compare(a.key[0], b.key[0]); n != 0 {
return n
}
return cmp.Compare(a.key[1], b.key[1])
})
return o
}
@@ -328,11 +334,3 @@ type pairKeyFloatVal struct {
func (kv pairKeyFloatVal) String() string {
return fmt.Sprintf("(%c,%c):%.*f", kv.key[0]+'A', kv.key[1]+'A', kv.prec, kv.val)
}
type orderedPairFloatsMap []pairKeyFloatVal
func (o orderedPairFloatsMap) Len() int { return len(o) }
func (o orderedPairFloatsMap) Less(i, j int) bool {
return o[i].key[0] < o[j].key[0] || (o[i].key[0] == o[j].key[0] && o[i].key[1] < o[j].key[1])
}
func (o orderedPairFloatsMap) Swap(i, j int) { o[i], o[j] = o[j], o[i] }

View File

@@ -5,9 +5,10 @@
package network
import (
"cmp"
"fmt"
"math"
"sort"
"slices"
"testing"
"gonum.org/v1/gonum/floats/scalar"
@@ -71,11 +72,11 @@ func TestHITS(t *testing.T) {
}
func orderedHubAuth(w map[int64]HubAuthority, prec int) []keyHubAuthVal {
o := make(orderedHubAuthMap, 0, len(w))
o := make([]keyHubAuthVal, 0, len(w))
for k, v := range w {
o = append(o, keyHubAuthVal{prec: prec, key: k, val: v})
}
sort.Sort(o)
slices.SortFunc(o, func(a, b keyHubAuthVal) int { return cmp.Compare(a.key, b.key) })
return o
}
@@ -90,9 +91,3 @@ func (kv keyHubAuthVal) String() string {
kv.key, kv.prec, kv.val.Hub, kv.prec, kv.val.Authority,
)
}
type orderedHubAuthMap []keyHubAuthVal
func (o orderedHubAuthMap) Len() int { return len(o) }
func (o orderedHubAuthMap) Less(i, j int) bool { return o[i].key < o[j].key }
func (o orderedHubAuthMap) Swap(i, j int) { o[i], o[j] = o[j], o[i] }

View File

@@ -5,9 +5,10 @@
package network
import (
"cmp"
"fmt"
"math"
"sort"
"slices"
"testing"
"gonum.org/v1/gonum/floats/scalar"
@@ -245,11 +246,13 @@ func TestEdgeWeightedPageRankSparse(t *testing.T) {
}
func orderedFloats(w map[int64]float64, prec int) []keyFloatVal {
o := make(orderedFloatsMap, 0, len(w))
o := make([]keyFloatVal, 0, len(w))
for k, v := range w {
o = append(o, keyFloatVal{prec: prec, key: k, val: v})
}
sort.Sort(o)
slices.SortFunc(o, func(a, b keyFloatVal) int {
return cmp.Compare(a.key, b.key)
})
return o
}
@@ -260,9 +263,3 @@ type keyFloatVal struct {
}
func (kv keyFloatVal) String() string { return fmt.Sprintf("%c:%.*f", kv.key+'A', kv.prec, kv.val) }
type orderedFloatsMap []keyFloatVal
func (o orderedFloatsMap) Len() int { return len(o) }
func (o orderedFloatsMap) Less(i, j int) bool { return o[i].key < o[j].key }
func (o orderedFloatsMap) Swap(i, j int) { o[i], o[j] = o[j], o[i] }

View File

@@ -6,9 +6,10 @@ package dynamic
import (
"bytes"
"cmp"
"fmt"
"io"
"sort"
"slices"
"text/tabwriter"
"gonum.org/v1/gonum/graph/path/internal/testgraphs"
@@ -131,7 +132,12 @@ func (d *dumper) printEdges(format string, edges []simple.WeightedEdge) {
return
}
var buf bytes.Buffer
sort.Sort(lexically(edges))
slices.SortFunc(edges, func(a, b simple.WeightedEdge) int {
if n := cmp.Compare(a.From().ID(), b.From().ID()); n != 0 {
return n
}
return cmp.Compare(a.To().ID(), b.To().ID())
})
for i, e := range edges {
if i != 0 {
fmt.Fprint(&buf, ", ")
@@ -143,11 +149,3 @@ func (d *dumper) printEdges(format string, edges []simple.WeightedEdge) {
}
fmt.Fprintf(d.w, format, buf.Bytes())
}
type lexically []simple.WeightedEdge
func (l lexically) Len() int { return len(l) }
func (l lexically) Less(i, j int) bool {
return l[i].From().ID() < l[j].From().ID() || (l[i].From().ID() == l[j].From().ID() && l[i].To().ID() < l[j].To().ID())
}
func (l lexically) Swap(i, j int) { l[i], l[j] = l[j], l[i] }

View File

@@ -5,9 +5,10 @@
package path
import (
"cmp"
"container/heap"
"math"
"sort"
"slices"
"gonum.org/v1/gonum/graph"
"gonum.org/v1/gonum/graph/simple"
@@ -163,7 +164,9 @@ type UndirectedWeightLister interface {
// If dst has nodes that exist in g, Kruskal will panic.
func Kruskal(dst WeightedBuilder, g UndirectedWeightLister) float64 {
edges := graph.WeightedEdgesOf(g.WeightedEdges())
sort.Sort(byWeight(edges))
slices.SortFunc(edges, func(a, b graph.WeightedEdge) int {
return cmp.Compare(a.Weight(), b.Weight())
})
ds := make(djSet)
it := g.Nodes()
@@ -183,9 +186,3 @@ func Kruskal(dst WeightedBuilder, g UndirectedWeightLister) float64 {
}
return w
}
type byWeight []graph.WeightedEdge
func (e byWeight) Len() int { return len(e) }
func (e byWeight) Less(i, j int) bool { return e[i].Weight() < e[j].Weight() }
func (e byWeight) Swap(i, j int) { e[i], e[j] = e[j], e[i] }

View File

@@ -5,8 +5,9 @@
package path
import (
"cmp"
"math"
"sort"
"slices"
"gonum.org/v1/gonum/graph"
"gonum.org/v1/gonum/graph/iterator"
@@ -103,7 +104,9 @@ func YenKShortestPaths(g graph.Graph, k int, cost float64, s, t graph.Node) [][]
break
}
sort.Sort(byPathWeight(pot))
slices.SortFunc(pot, func(a, b yenShortest) int {
return cmp.Compare(a.weight, b.weight)
})
best := pot[0]
if len(best.path) <= 1 || best.weight > cost {
break
@@ -134,12 +137,6 @@ type yenShortest struct {
weight float64
}
type byPathWeight []yenShortest
func (s byPathWeight) Len() int { return len(s) }
func (s byPathWeight) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s byPathWeight) Less(i, j int) bool { return s[i].weight < s[j].weight }
// yenKSPAdjuster allows walked edges to be omitted from a graph
// without altering the embedded graph.
type yenKSPAdjuster struct {

View File

@@ -5,9 +5,10 @@
package path
import (
"cmp"
"math"
"reflect"
"sort"
"slices"
"testing"
"gonum.org/v1/gonum/graph"
@@ -370,11 +371,13 @@ func TestYenKSP(t *testing.T) {
got := YenKShortestPaths(g.(graph.Graph), test.k, test.cost, test.query.From(), test.query.To())
gotIDs := pathIDs(got)
paths := make(byPathWeight, len(gotIDs))
paths := make([]yenShortest, len(gotIDs))
for i, p := range got {
paths[i] = yenShortest{path: p, weight: pathWeight(p, g.(graph.Weighted))}
}
if !sort.IsSorted(paths) {
if !slices.IsSortedFunc(paths, func(a, b yenShortest) int {
return cmp.Compare(a.weight, b.weight)
}) {
t.Errorf("unexpected result for %q: got:%+v", test.name, paths)
}
if test.relaxed {

View File

@@ -93,7 +93,7 @@ func ExampleModular_subgraphIsomorphism() {
mc := topo.BronKerbosch(p)
// Report the largest.
sort.Sort(byLength(mc))
sortByLengthDescending(mc)
max := len(mc[0])
w := tabwriter.NewWriter(os.Stdout, 5, 0, 0, ' ', tabwriter.AlignRight)
fmt.Println(" Adenine Guanine")
@@ -128,10 +128,6 @@ func ExampleModular_subgraphIsomorphism() {
// C 4 C 4
}
// byLength implements the sort.Interface, sorting the slices
// descending by length.
type byLength [][]graph.Node
func (n byLength) Len() int { return len(n) }
func (n byLength) Less(i, j int) bool { return len(n[i]) > len(n[j]) }
func (n byLength) Swap(i, j int) { n[i], n[j] = n[j], n[i] }
func sortByLengthDescending(mc [][]graph.Node) {
sort.Slice(mc, func(i, j int) bool { return len(mc[i]) > len(mc[j]) })
}

View File

@@ -7,9 +7,10 @@
package testgraph // import "gonum.org/v1/gonum/graph/testgraph"
import (
"cmp"
"fmt"
"reflect"
"sort"
"slices"
"testing"
"golang.org/x/exp/rand"
@@ -616,14 +617,14 @@ func checkEdges(t *testing.T, name string, g graph.Graph, got, want []Edge) {
t.Helper()
switch g.(type) {
case graph.Undirected:
sort.Sort(lexicalUndirectedEdges(got))
sort.Sort(lexicalUndirectedEdges(want))
sortLexicalUndirectedEdges(got)
sortLexicalUndirectedEdges(want)
if !undirectedEdgeSetEqual(got, want) {
t.Errorf("unexpected edges result for test %q:\ngot: %#v\nwant:%#v", name, got, want)
}
default:
sort.Sort(lexicalEdges(got))
sort.Sort(lexicalEdges(want))
sortLexicalEdges(got)
sortLexicalEdges(want)
if !reflect.DeepEqual(got, want) {
t.Errorf("unexpected edges result for test %q:\ngot: %#v\nwant:%#v", name, got, want)
}
@@ -1230,77 +1231,67 @@ func AdjacencyMatrix(t *testing.T, b Builder) {
}
}
// lexicalEdges sorts a collection of edges lexically on the
// sortLexicalEdges sorts a collection of edges lexically on the
// keys: from.ID > to.ID > [line.ID] > [weight].
type lexicalEdges []Edge
func (e lexicalEdges) Len() int { return len(e) }
func (e lexicalEdges) Less(i, j int) bool {
if e[i].From().ID() < e[j].From().ID() {
return true
}
sf := e[i].From().ID() == e[j].From().ID()
if sf && e[i].To().ID() < e[j].To().ID() {
return true
}
st := e[i].To().ID() == e[j].To().ID()
li, oki := e[i].(graph.Line)
lj, okj := e[j].(graph.Line)
if oki != okj {
panic(fmt.Sprintf("testgraph: mismatched types %T != %T", e[i], e[j]))
}
if !oki {
return sf && st && lessWeight(e[i], e[j])
}
if sf && st && li.ID() < lj.ID() {
return true
}
return sf && st && li.ID() == lj.ID() && lessWeight(e[i], e[j])
func sortLexicalEdges(edges []Edge) {
slices.SortFunc(edges, func(a, b Edge) int {
if n := cmp.Compare(a.From().ID(), b.From().ID()); n != 0 {
return n
}
if n := cmp.Compare(a.To().ID(), b.To().ID()); n != 0 {
return n
}
la, oka := a.(graph.Line)
lb, okb := b.(graph.Line)
if oka != okb {
panic(fmt.Sprintf("testgraph: mismatched types %T != %T", a, b))
}
if oka {
if n := cmp.Compare(la.ID(), lb.ID()); n != 0 {
return n
}
}
return cmpWeight(a, b)
})
}
func (e lexicalEdges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
// lexicalUndirectedEdges sorts a collection of edges lexically on the
// sortLexicalUndirectedEdges sorts a collection of edges lexically on the
// keys: lo.ID > hi.ID > [line.ID] > [weight].
type lexicalUndirectedEdges []Edge
func sortLexicalUndirectedEdges(edges []Edge) {
slices.SortFunc(edges, func(a, b Edge) int {
lida, hida, _ := undirectedIDs(a)
lidb, hidb, _ := undirectedIDs(b)
func (e lexicalUndirectedEdges) Len() int { return len(e) }
func (e lexicalUndirectedEdges) Less(i, j int) bool {
lidi, hidi, _ := undirectedIDs(e[i])
lidj, hidj, _ := undirectedIDs(e[j])
if lidi < lidj {
return true
}
sl := lidi == lidj
if sl && hidi < hidj {
return true
}
sh := hidi == hidj
li, oki := e[i].(graph.Line)
lj, okj := e[j].(graph.Line)
if oki != okj {
panic(fmt.Sprintf("testgraph: mismatched types %T != %T", e[i], e[j]))
}
if !oki {
return sl && sh && lessWeight(e[i], e[j])
}
if sl && sh && li.ID() < lj.ID() {
return true
}
return sl && sh && li.ID() == lj.ID() && lessWeight(e[i], e[j])
if n := cmp.Compare(lida, lidb); n != 0 {
return n
}
if n := cmp.Compare(hida, hidb); n != 0 {
return n
}
la, oka := a.(graph.Line)
lb, okb := b.(graph.Line)
if oka != okb {
panic(fmt.Sprintf("testgraph: mismatched types %T != %T", a, b))
}
if oka {
if n := cmp.Compare(la.ID(), lb.ID()); n != 0 {
return n
}
}
return cmpWeight(a, b)
})
}
func (e lexicalUndirectedEdges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func lessWeight(ei, ej Edge) bool {
wei, oki := ei.(graph.WeightedEdge)
wej, okj := ej.(graph.WeightedEdge)
if oki != okj {
panic(fmt.Sprintf("testgraph: mismatched types %T != %T", ei, ej))
func cmpWeight(a, b Edge) int {
wea, oka := a.(graph.WeightedEdge)
web, okb := b.(graph.WeightedEdge)
if oka != okb {
panic(fmt.Sprintf("testgraph: mismatched types %T != %T", a, b))
}
if !oki {
return false
if !oka {
return 0
}
return wei.Weight() < wej.Weight()
return cmp.Compare(wea.Weight(), web.Weight())
}
// undirectedEdgeSetEqual returned whether a pair of undirected edge

View File

@@ -9,7 +9,7 @@ import (
"fmt"
"io"
"log"
"sort"
"slices"
"strings"
"gonum.org/v1/gonum/graph/simple"
@@ -185,7 +185,7 @@ func ExampleTarjanSCC_twoSAT() {
for v, t := range state {
ps = append(ps, fmt.Sprintf("%s:%t", v, t))
}
sort.Strings(ps)
slices.Sort(ps)
fmt.Printf("system %d is satisfiable: %s\n", i, strings.Join(ps, " "))
}

View File

@@ -13,13 +13,13 @@ import (
)
// ByID sorts a slice of graph.Node by ID.
func ByID(n []graph.Node) {
func ByID[S ~[]E, E graph.Node](n S) {
sort.Slice(n, func(i, j int) bool { return n[i].ID() < n[j].ID() })
}
// BySliceValues sorts a slice of []cmp.Ordered lexically by the values of
// the []cmp.Ordered.
func BySliceValues[S interface{ ~[]E }, E cmp.Ordered](c []S) {
func BySliceValues[S ~[]E, E cmp.Ordered](c []S) {
slices.SortFunc(c, func(a, b S) int {
l := min(len(a), len(b))
for k, v := range a[:l] {
@@ -34,35 +34,27 @@ func BySliceValues[S interface{ ~[]E }, E cmp.Ordered](c []S) {
// BySliceIDs sorts a slice of []graph.Node lexically by the IDs of the
// []graph.Node.
func BySliceIDs(c [][]graph.Node) {
sort.Slice(c, func(i, j int) bool {
a, b := c[i], c[j]
l := len(a)
if len(b) < l {
l = len(b)
}
slices.SortFunc(c, func(a, b []graph.Node) int {
l := min(len(a), len(b))
for k, v := range a[:l] {
if v.ID() < b[k].ID() {
return true
}
if v.ID() > b[k].ID() {
return false
if n := cmp.Compare(v.ID(), b[k].ID()); n != 0 {
return n
}
}
return len(a) < len(b)
return cmp.Compare(len(a), len(b))
})
}
// LinesByIDs sort a slice of graph.LinesByIDs lexically by the From IDs,
// then by the To IDs, finally by the Line IDs.
func LinesByIDs(n []graph.Line) {
sort.Slice(n, func(i, j int) bool {
a, b := n[i], n[j]
if a.From().ID() != b.From().ID() {
return a.From().ID() < b.From().ID()
slices.SortFunc(n, func(a, b graph.Line) int {
if n := cmp.Compare(a.From().ID(), b.From().ID()); n != 0 {
return n
}
if a.To().ID() != b.To().ID() {
return a.To().ID() < b.To().ID()
if n := cmp.Compare(a.To().ID(), b.To().ID()); n != 0 {
return n
}
return n[i].ID() < n[j].ID()
return cmp.Compare(a.ID(), b.ID())
})
}