mirror of
https://github.com/chaisql/chai.git
synced 2025-11-01 11:22:42 +08:00
This commit is contained in:
@@ -77,7 +77,7 @@ func (c *Catalog) GetTable(tx *Transaction, tableName string) (*Table, error) {
|
||||
|
||||
return &Table{
|
||||
Tx: tx,
|
||||
Tree: tree.New(tx.Session, ti.StoreNamespace, 0),
|
||||
Tree: tree.New(tx.Session, ti.StoreNamespace, ti.PrimaryKeySortOrder()),
|
||||
Info: ti,
|
||||
}, nil
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func (c *Catalog) GetIndex(tx *Transaction, indexName string) (*Index, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewIndex(tree.New(tx.Session, info.StoreNamespace, 0), *info), nil
|
||||
return NewIndex(tree.New(tx.Session, info.StoreNamespace, info.KeySortOrder), *info), nil
|
||||
}
|
||||
|
||||
// GetIndexInfo returns an index info by name.
|
||||
@@ -297,7 +297,7 @@ func (c *CatalogWriter) DropTable(tx *Transaction, tableName string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return tree.New(tx.Session, ti.StoreNamespace, 0).Truncate()
|
||||
return tree.New(tx.Session, ti.StoreNamespace, ti.PrimaryKeySortOrder()).Truncate()
|
||||
}
|
||||
|
||||
// CreateIndex creates an index with the given name.
|
||||
@@ -358,7 +358,7 @@ func (c *CatalogWriter) DropIndex(tx *Transaction, name string) error {
|
||||
}
|
||||
|
||||
func (c *CatalogWriter) dropIndex(tx *Transaction, info *IndexInfo) error {
|
||||
err := tree.New(tx.Session, info.StoreNamespace, 0).Truncate()
|
||||
err := tree.New(tx.Session, info.StoreNamespace, info.KeySortOrder).Truncate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -866,7 +866,7 @@ func (s *CatalogStore) Info() *TableInfo {
|
||||
func (s *CatalogStore) Table(tx *Transaction) *Table {
|
||||
return &Table{
|
||||
Tx: tx,
|
||||
Tree: tree.New(tx.Session, CatalogTableNamespace, 0),
|
||||
Tree: tree.New(tx.Session, CatalogTableNamespace, s.info.PrimaryKeySortOrder()),
|
||||
Info: s.info,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,6 +232,7 @@ type TableConstraint struct {
|
||||
Check TableExpression
|
||||
Unique bool
|
||||
PrimaryKey bool
|
||||
SortOrder tree.SortOrder
|
||||
}
|
||||
|
||||
func (t *TableConstraint) String() string {
|
||||
@@ -247,11 +248,29 @@ func (t *TableConstraint) String() string {
|
||||
sb.WriteString(")")
|
||||
case t.PrimaryKey:
|
||||
sb.WriteString(" PRIMARY KEY (")
|
||||
sb.WriteString(t.Paths.String())
|
||||
for i, pt := range t.Paths {
|
||||
if i > 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString(pt.String())
|
||||
|
||||
if t.SortOrder.IsDesc(i) {
|
||||
sb.WriteString(" DESC")
|
||||
}
|
||||
}
|
||||
sb.WriteString(")")
|
||||
case t.Unique:
|
||||
sb.WriteString(" UNIQUE (")
|
||||
sb.WriteString(t.Paths.String())
|
||||
for i, pt := range t.Paths {
|
||||
if i > 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString(pt.String())
|
||||
|
||||
if t.SortOrder.IsDesc(i) {
|
||||
sb.WriteString(" DESC")
|
||||
}
|
||||
}
|
||||
sb.WriteString(")")
|
||||
}
|
||||
|
||||
|
||||
@@ -216,7 +216,7 @@ func BenchmarkCompositeIndexSet(b *testing.B) {
|
||||
b.StartTimer()
|
||||
for j := 0; j < size; j++ {
|
||||
k := fmt.Sprintf("name-%d", j)
|
||||
idx.Set(values(types.NewTextValue(k), types.NewTextValue(k)), []byte(k))
|
||||
_ = idx.Set(values(types.NewTextValue(k), types.NewTextValue(k)), []byte(k))
|
||||
}
|
||||
b.StopTimer()
|
||||
cleanup()
|
||||
|
||||
@@ -28,30 +28,6 @@ type TableInfo struct {
|
||||
TableConstraints TableConstraints
|
||||
}
|
||||
|
||||
func NewTableInfo(tableName string, fcs []*FieldConstraint, tcs []*TableConstraint) (*TableInfo, error) {
|
||||
ti := TableInfo{
|
||||
TableName: tableName,
|
||||
}
|
||||
|
||||
// add field constraints first, in the order they were defined
|
||||
for _, fc := range fcs {
|
||||
err := ti.AddFieldConstraint(fc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// add table constraints
|
||||
for _, tc := range tcs {
|
||||
err := ti.AddTableConstraint(tc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &ti, nil
|
||||
}
|
||||
|
||||
func (ti *TableInfo) AddFieldConstraint(newFc *FieldConstraint) error {
|
||||
if ti.FieldConstraints.ByField == nil {
|
||||
ti.FieldConstraints.ByField = make(map[string]*FieldConstraint)
|
||||
@@ -151,6 +127,7 @@ func (ti *TableInfo) GetPrimaryKey() *PrimaryKey {
|
||||
}
|
||||
|
||||
pk.Paths = tc.Paths
|
||||
pk.SortOrder = tc.SortOrder
|
||||
|
||||
for _, pp := range tc.Paths {
|
||||
fc := ti.GetFieldConstraintForPath(pp)
|
||||
@@ -167,6 +144,15 @@ func (ti *TableInfo) GetPrimaryKey() *PrimaryKey {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ti *TableInfo) PrimaryKeySortOrder() tree.SortOrder {
|
||||
pk := ti.GetPrimaryKey()
|
||||
if pk == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return pk.SortOrder
|
||||
}
|
||||
|
||||
func (ti *TableInfo) GetFieldConstraintForPath(p document.Path) *FieldConstraint {
|
||||
return ti.FieldConstraints.GetFieldConstraintForPath(p)
|
||||
}
|
||||
@@ -232,6 +218,7 @@ func (ti *TableInfo) Clone() *TableInfo {
|
||||
type PrimaryKey struct {
|
||||
Paths document.Paths
|
||||
Types []types.ValueType
|
||||
SortOrder tree.SortOrder
|
||||
}
|
||||
|
||||
// IndexInfo holds the configuration of an index.
|
||||
@@ -241,6 +228,9 @@ type IndexInfo struct {
|
||||
IndexName string
|
||||
Paths []document.Path
|
||||
|
||||
// Sort order of each indexed field.
|
||||
KeySortOrder tree.SortOrder
|
||||
|
||||
// If set to true, values will be associated with at most one key. False by default.
|
||||
Unique bool
|
||||
|
||||
@@ -251,23 +241,27 @@ type IndexInfo struct {
|
||||
}
|
||||
|
||||
// String returns a SQL representation.
|
||||
func (i *IndexInfo) String() string {
|
||||
func (idx *IndexInfo) String() string {
|
||||
var s strings.Builder
|
||||
|
||||
s.WriteString("CREATE ")
|
||||
if i.Unique {
|
||||
if idx.Unique {
|
||||
s.WriteString("UNIQUE ")
|
||||
}
|
||||
|
||||
fmt.Fprintf(&s, "INDEX %s ON %s (", stringutil.NormalizeIdentifier(i.IndexName, '`'), stringutil.NormalizeIdentifier(i.Owner.TableName, '`'))
|
||||
fmt.Fprintf(&s, "INDEX %s ON %s (", stringutil.NormalizeIdentifier(idx.IndexName, '`'), stringutil.NormalizeIdentifier(idx.Owner.TableName, '`'))
|
||||
|
||||
for i, p := range i.Paths {
|
||||
for i, p := range idx.Paths {
|
||||
if i > 0 {
|
||||
s.WriteString(", ")
|
||||
}
|
||||
|
||||
// Path
|
||||
s.WriteString(p.String())
|
||||
|
||||
if idx.KeySortOrder.IsDesc(i) {
|
||||
s.WriteString(" DESC")
|
||||
}
|
||||
}
|
||||
|
||||
s.WriteString(")")
|
||||
|
||||
@@ -319,7 +319,7 @@ func BenchmarkTableInsert(b *testing.B) {
|
||||
|
||||
b.StartTimer()
|
||||
for j := 0; j < size; j++ {
|
||||
tb.Insert(&fb)
|
||||
_, _, _ = tb.Insert(&fb)
|
||||
}
|
||||
b.StopTimer()
|
||||
cleanup()
|
||||
@@ -348,7 +348,7 @@ func BenchmarkTableScan(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
tb.IterateOnRange(nil, false, func(*tree.Key, types.Document) error {
|
||||
_ = tb.IterateOnRange(nil, false, func(*tree.Key, types.Document) error {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,11 +7,6 @@ import (
|
||||
|
||||
var _ Session = (*BatchSession)(nil)
|
||||
|
||||
const (
|
||||
// 10MB
|
||||
defaultMaxBatchSize = 10 * 1024 * 1024
|
||||
)
|
||||
|
||||
var (
|
||||
tombStone = []byte{0}
|
||||
)
|
||||
|
||||
@@ -8,6 +8,11 @@ import (
|
||||
"github.com/genjidb/genji/lib/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxBatchSize = 10 * 1024 * 1024 // 10MB
|
||||
defaultMaxTransientBatchSize int = 1 << 19 // 512KB
|
||||
)
|
||||
|
||||
type Store struct {
|
||||
db *pebble.DB
|
||||
opts Options
|
||||
|
||||
@@ -5,10 +5,6 @@ import (
|
||||
"github.com/cockroachdb/pebble"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxTransientBatchSize int = 1 << 19 // 512KB
|
||||
)
|
||||
|
||||
var _ Session = (*TransientSession)(nil)
|
||||
|
||||
type TransientSession struct {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/genjidb/genji/internal/stream/docs"
|
||||
"github.com/genjidb/genji/internal/stream/index"
|
||||
"github.com/genjidb/genji/internal/stream/table"
|
||||
"github.com/genjidb/genji/internal/tree"
|
||||
)
|
||||
|
||||
// SelectIndex attempts to replace a sequential scan by an index scan or a pk scan by
|
||||
@@ -17,9 +18,13 @@ import (
|
||||
// Compatibility of filter nodes.
|
||||
//
|
||||
// For a filter node to be selected if must be of the following form:
|
||||
//
|
||||
// <path> <compatible operator> <expression>
|
||||
//
|
||||
// or
|
||||
//
|
||||
// <expression> <compatible operator> <path>
|
||||
//
|
||||
// path: path of a document
|
||||
// compatible operator: one of =, >, >=, <, <=, IN
|
||||
// expression: any expression
|
||||
@@ -29,33 +34,45 @@ import (
|
||||
// Once we have a list of all compatible filter nodes, we try to associate
|
||||
// indexes with them.
|
||||
// Given the following index:
|
||||
//
|
||||
// CREATE INDEX foo_a_idx ON foo (a)
|
||||
//
|
||||
// and this query:
|
||||
//
|
||||
// SELECT * FROM foo WHERE a > 5 AND b > 10
|
||||
// table.Scan('foo') | docs.Filter(a > 5) | docs.Filter(b > 10) | docs.Project(*)
|
||||
//
|
||||
// foo_a_idx matches docs.Filter(a > 5) and can be selected.
|
||||
// Now, with a different index:
|
||||
//
|
||||
// CREATE INDEX foo_a_b_c_idx ON foo(a, b, c)
|
||||
//
|
||||
// and this query:
|
||||
//
|
||||
// SELECT * FROM foo WHERE a > 5 AND c > 20
|
||||
// table.Scan('foo') | docs.Filter(a > 5) | docs.Filter(c > 20) | docs.Project(*)
|
||||
//
|
||||
// foo_a_b_c_idx matches with the first filter because a is the leftmost path indexed by it.
|
||||
// The second filter is not selected because it is not the second leftmost path.
|
||||
// For composite indexes, filter nodes can be selected if they match with one or more indexed path
|
||||
// consecutively, from left to right.
|
||||
// Now, let's have a look a this query:
|
||||
//
|
||||
// SELECT * FROM foo WHERE a = 5 AND b = 10 AND c > 15 AND d > 20
|
||||
// table.Scan('foo') | docs.Filter(a = 5) | docs.Filter(b = 10) | docs.Filter(c > 15) | docs.Filter(d > 20) | docs.Project(*)
|
||||
//
|
||||
// foo_a_b_c_idx matches with first three filters because they satisfy several conditions:
|
||||
// - each of them matches with the first 3 indexed paths, consecutively.
|
||||
// - the first 2 filters use the equal operator
|
||||
// A counter-example:
|
||||
//
|
||||
// SELECT * FROM foo WHERE a = 5 AND b > 10 AND c > 15 AND d > 20
|
||||
// table.Scan('foo') | docs.Filter(a = 5) | docs.Filter(b > 10) | docs.Filter(c > 15) | docs.Filter(d > 20) | docs.Project(*)
|
||||
//
|
||||
// foo_a_b_c_idx only matches with the first two filter nodes because while the first node uses the equal
|
||||
// operator, the second one doesn't, and thus the third node cannot be selected as well.
|
||||
//
|
||||
// Candidates and cost
|
||||
// # Candidates and cost
|
||||
//
|
||||
// Because a table can have multiple indexes, we need to establish which of these
|
||||
// indexes should be used to run the query, if not all of them.
|
||||
@@ -141,7 +158,7 @@ func (i *indexSelector) selectIndex() error {
|
||||
}
|
||||
pk := tb.GetPrimaryKey()
|
||||
if pk != nil {
|
||||
selected = i.associateIndexWithNodes(tb.TableName, false, false, pk.Paths, nodes)
|
||||
selected = i.associateIndexWithNodes(tb.TableName, false, false, pk.Paths, pk.SortOrder, nodes)
|
||||
if selected != nil {
|
||||
cost = selected.Cost()
|
||||
}
|
||||
@@ -155,7 +172,7 @@ func (i *indexSelector) selectIndex() error {
|
||||
return err
|
||||
}
|
||||
|
||||
candidate := i.associateIndexWithNodes(idxInfo.IndexName, true, idxInfo.Unique, idxInfo.Paths, nodes)
|
||||
candidate := i.associateIndexWithNodes(idxInfo.IndexName, true, idxInfo.Unique, idxInfo.Paths, idxInfo.KeySortOrder, nodes)
|
||||
|
||||
if candidate == nil {
|
||||
continue
|
||||
@@ -258,13 +275,14 @@ func (i *indexSelector) isTempTreeSortIndexable(n *docs.TempTreeSortOperator) *i
|
||||
// - transform all associated nodes into an index range
|
||||
// If not all indexed paths have an associated filter node, return whatever has been associated
|
||||
// A few examples for this index: CREATE INDEX ON foo(a, b, c)
|
||||
//
|
||||
// fitler(a = 3) | docs.Filter(b = 10) | (c > 20)
|
||||
// -> range = {min: [3, 10, 20]}
|
||||
// fitler(a = 3) | docs.Filter(b > 10) | (c > 20)
|
||||
// -> range = {min: [3], exact: true}
|
||||
// docs.Filter(a IN (1, 2))
|
||||
// -> ranges = [1], [2]
|
||||
func (i *indexSelector) associateIndexWithNodes(treeName string, isIndex bool, isUnique bool, paths []document.Path, nodes indexableNodes) *candidate {
|
||||
func (i *indexSelector) associateIndexWithNodes(treeName string, isIndex bool, isUnique bool, paths []document.Path, sortOrder tree.SortOrder, nodes indexableNodes) *candidate {
|
||||
found := make([]*indexableNode, 0, len(paths))
|
||||
var desc bool
|
||||
|
||||
@@ -333,6 +351,11 @@ func (i *indexSelector) associateIndexWithNodes(treeName string, isIndex bool, i
|
||||
isUnique: isUnique,
|
||||
}
|
||||
|
||||
// in case the primary key or index is descending, we need to use a reverse the order
|
||||
if sortOrder.IsDesc(0) {
|
||||
desc = !desc
|
||||
}
|
||||
|
||||
if !isIndex {
|
||||
if !desc {
|
||||
c.replaceRootBy = []stream.Operator{
|
||||
@@ -381,6 +404,13 @@ func (i *indexSelector) associateIndexWithNodes(treeName string, isIndex bool, i
|
||||
isUnique: isUnique,
|
||||
}
|
||||
|
||||
// in case the indexed path is descending, we need to reverse the order
|
||||
if found[len(found)-1].orderBy != nil {
|
||||
if sortOrder.IsDesc(len(found) - 1) {
|
||||
desc = !desc
|
||||
}
|
||||
}
|
||||
|
||||
if !isIndex {
|
||||
if !desc {
|
||||
c.replaceRootBy = []stream.Operator{
|
||||
@@ -533,7 +563,7 @@ func (i *indexSelector) buildRangeFromOperator(lastOp scanner.Token, paths []doc
|
||||
// It can be used to filter the results of a query or
|
||||
// to order the results.
|
||||
type indexableNode struct {
|
||||
// associated stream node (either a DocsFilterNode or a DocsTempTreeSortNote)
|
||||
// associated stream node (either a DocsFilterNode or a DocsTempTreeSortNode)
|
||||
node stream.Operator
|
||||
|
||||
// For filter nodes
|
||||
|
||||
@@ -62,6 +62,7 @@ func (stmt *CreateTableStmt) Run(ctx *Context) (Result, error) {
|
||||
TableName: stmt.Info.TableName,
|
||||
Paths: tc.Paths,
|
||||
},
|
||||
KeySortOrder: tc.SortOrder,
|
||||
})
|
||||
if err != nil {
|
||||
return res, err
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/genjidb/genji/internal/expr"
|
||||
"github.com/genjidb/genji/internal/query/statement"
|
||||
"github.com/genjidb/genji/internal/sql/scanner"
|
||||
"github.com/genjidb/genji/internal/tree"
|
||||
"github.com/genjidb/genji/types"
|
||||
)
|
||||
|
||||
@@ -198,10 +199,27 @@ LOOP:
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
tcs = append(tcs, &database.TableConstraint{
|
||||
tc := database.TableConstraint{
|
||||
PrimaryKey: true,
|
||||
Paths: document.Paths{path},
|
||||
})
|
||||
}
|
||||
|
||||
// if ASC is set, we ignore it, otherwise we check for DESC
|
||||
ok, err := p.parseOptional(scanner.ASC)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !ok {
|
||||
ok, err = p.parseOptional(scanner.DESC)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if ok {
|
||||
tc.SortOrder = tree.SortOrder(0).SetDesc(0)
|
||||
}
|
||||
}
|
||||
|
||||
tcs = append(tcs, &tc)
|
||||
case scanner.NOT:
|
||||
// Parse "NULL"
|
||||
if err := p.parseTokens(scanner.NULL); err != nil {
|
||||
@@ -340,6 +358,7 @@ func (p *Parser) parseTableConstraint(stmt *statement.CreateTableStmt) (*databas
|
||||
|
||||
var tc database.TableConstraint
|
||||
var requiresTc bool
|
||||
var order tree.SortOrder
|
||||
|
||||
if ok, _ := p.parseOptional(scanner.CONSTRAINT); ok {
|
||||
tok, pos, lit := p.ScanIgnoreWhitespace()
|
||||
@@ -364,7 +383,7 @@ func (p *Parser) parseTableConstraint(stmt *statement.CreateTableStmt) (*databas
|
||||
|
||||
tc.PrimaryKey = true
|
||||
|
||||
tc.Paths, err = p.parsePathList()
|
||||
tc.Paths, order, err = p.parsePathList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -372,9 +391,10 @@ func (p *Parser) parseTableConstraint(stmt *statement.CreateTableStmt) (*databas
|
||||
tok, pos, lit := p.ScanIgnoreWhitespace()
|
||||
return nil, newParseError(scanner.Tokstr(tok, lit), []string{"PATHS"}, pos)
|
||||
}
|
||||
tc.SortOrder = order
|
||||
case scanner.UNIQUE:
|
||||
tc.Unique = true
|
||||
tc.Paths, err = p.parsePathList()
|
||||
tc.Paths, order, err = p.parsePathList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -382,6 +402,7 @@ func (p *Parser) parseTableConstraint(stmt *statement.CreateTableStmt) (*databas
|
||||
tok, pos, lit := p.ScanIgnoreWhitespace()
|
||||
return nil, newParseError(scanner.Tokstr(tok, lit), []string{"PATHS"}, pos)
|
||||
}
|
||||
tc.SortOrder = order
|
||||
case scanner.CHECK:
|
||||
e, paths, err := p.parseCheckConstraint()
|
||||
if err != nil {
|
||||
@@ -437,7 +458,7 @@ func (p *Parser) parseCreateIndexStatement(unique bool) (*statement.CreateIndexS
|
||||
return nil, err
|
||||
}
|
||||
|
||||
paths, err := p.parsePathList()
|
||||
paths, order, err := p.parsePathList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -447,6 +468,7 @@ func (p *Parser) parseCreateIndexStatement(unique bool) (*statement.CreateIndexS
|
||||
}
|
||||
|
||||
stmt.Info.Paths = paths
|
||||
stmt.Info.KeySortOrder = order
|
||||
|
||||
return &stmt, nil
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/genjidb/genji/internal/query"
|
||||
"github.com/genjidb/genji/internal/query/statement"
|
||||
"github.com/genjidb/genji/internal/sql/scanner"
|
||||
"github.com/genjidb/genji/internal/tree"
|
||||
)
|
||||
|
||||
// Parser represents an Genji SQL Parser.
|
||||
@@ -158,23 +159,41 @@ func (p *Parser) parseCondition() (expr.Expr, error) {
|
||||
}
|
||||
|
||||
// parsePathList parses a list of paths in the form: (path, path, ...), if exists
|
||||
func (p *Parser) parsePathList() ([]document.Path, error) {
|
||||
func (p *Parser) parsePathList() ([]document.Path, tree.SortOrder, error) {
|
||||
// Parse ( token.
|
||||
if ok, err := p.parseOptional(scanner.LPAREN); !ok || err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var paths []document.Path
|
||||
var err error
|
||||
var path document.Path
|
||||
var order tree.SortOrder
|
||||
|
||||
// Parse first (required) path.
|
||||
if path, err = p.parsePath(); err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
paths = append(paths, path)
|
||||
|
||||
// Parse optional ASC/DESC token.
|
||||
ok, err := p.parseOptional(scanner.DESC)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if ok {
|
||||
order = order.SetDesc(0)
|
||||
} else {
|
||||
// ignore ASC if set
|
||||
_, err := p.parseOptional(scanner.ASC)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Parse remaining (optional) paths.
|
||||
i := 0
|
||||
for {
|
||||
if tok, _, _ := p.ScanIgnoreWhitespace(); tok != scanner.COMMA {
|
||||
p.Unscan()
|
||||
@@ -183,18 +202,35 @@ func (p *Parser) parsePathList() ([]document.Path, error) {
|
||||
|
||||
vp, err := p.parsePath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
paths = append(paths, vp)
|
||||
|
||||
i++
|
||||
|
||||
// Parse optional ASC/DESC token.
|
||||
ok, err := p.parseOptional(scanner.DESC)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if ok {
|
||||
order = order.SetDesc(i)
|
||||
} else {
|
||||
// ignore ASC if set
|
||||
_, err := p.parseOptional(scanner.ASC)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse required ) token.
|
||||
if err := p.parseTokens(scanner.RPAREN); err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return paths, nil
|
||||
return paths, order, nil
|
||||
}
|
||||
|
||||
// Scan returns the next token from the underlying scanner.
|
||||
|
||||
@@ -18,6 +18,26 @@ SELECT name, sql FROM __genji_catalog WHERE type = "table" AND name = "test";
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: with ASC order
|
||||
CREATE TABLE test(a INT PRIMARY KEY ASC);
|
||||
SELECT name, sql FROM __genji_catalog WHERE type = "table" AND name = "test";
|
||||
/* result:
|
||||
{
|
||||
"name": "test",
|
||||
"sql": "CREATE TABLE test (a INTEGER NOT NULL, CONSTRAINT test_pk PRIMARY KEY (a))"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: with DESC order
|
||||
CREATE TABLE test(a INT PRIMARY KEY DESC);
|
||||
SELECT name, sql FROM __genji_catalog WHERE type = "table" AND name = "test";
|
||||
/* result:
|
||||
{
|
||||
"name": "test",
|
||||
"sql": "CREATE TABLE test (a INTEGER NOT NULL, CONSTRAINT test_pk PRIMARY KEY (a DESC))"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: twice
|
||||
CREATE TABLE test(a INT PRIMARY KEY PRIMARY KEY);
|
||||
-- error:
|
||||
@@ -56,6 +76,17 @@ SELECT name, sql FROM __genji_catalog WHERE type = "table" AND name = "test";
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
-- test: table constraint: multiple fields: with order
|
||||
CREATE TABLE test(a INT, b INT, c (d INT), PRIMARY KEY(a DESC, b, c.d ASC));
|
||||
SELECT name, sql FROM __genji_catalog WHERE type = "table" AND name = "test";
|
||||
/* result:
|
||||
{
|
||||
"name": "test",
|
||||
"sql": "CREATE TABLE test (a INTEGER NOT NULL, b INTEGER NOT NULL, c (d INTEGER NOT NULL), CONSTRAINT test_pk PRIMARY KEY (a DESC, b, c.d))"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: table constraint: undeclared fields
|
||||
CREATE TABLE test(a INT, b INT, PRIMARY KEY(a, b, c));
|
||||
-- error:
|
||||
|
||||
@@ -97,6 +97,25 @@ WHERE
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: table constraint: multiple fields with order
|
||||
CREATE TABLE test(a INT, b INT, c INT, UNIQUE(a DESC, b ASC, c));
|
||||
SELECT name, sql
|
||||
FROM __genji_catalog
|
||||
WHERE
|
||||
(type = "table" AND name = "test")
|
||||
OR
|
||||
(type = "index" AND name = "test_a_b_c_idx");
|
||||
/* result:
|
||||
{
|
||||
"name": "test",
|
||||
"sql": "CREATE TABLE test (a INTEGER, b INTEGER, c INTEGER, CONSTRAINT test_a_b_c_unique UNIQUE (a DESC, b, c))"
|
||||
}
|
||||
{
|
||||
"name": "test_a_b_c_idx",
|
||||
"sql": "CREATE UNIQUE INDEX test_a_b_c_idx ON test (a DESC, b, c)"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: table constraint: undeclared field
|
||||
CREATE TABLE test(a INT, UNIQUE(b));
|
||||
-- error:
|
||||
|
||||
154
sqltests/SELECT/order_by_desc_index.sql
Normal file
154
sqltests/SELECT/order_by_desc_index.sql
Normal file
@@ -0,0 +1,154 @@
|
||||
-- setup:
|
||||
CREATE TABLE test(a INT, b DOUBLE);
|
||||
CREATE INDEX on test(a DESC, b DESC);
|
||||
INSERT INTO test (a, b) VALUES (50, 2), (100, 3), (10, 1), (100, 4);
|
||||
|
||||
-- test: asc
|
||||
SELECT a, b FROM test ORDER BY a;
|
||||
/* result:
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: asc / explain
|
||||
EXPLAIN SELECT a FROM test ORDER BY a;
|
||||
/* result:
|
||||
{
|
||||
plan: "index.ScanReverse(\"test_a_b_idx\") | docs.Project(a)"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: asc / wildcard
|
||||
SELECT * FROM test ORDER BY a;
|
||||
/* result:
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / no index
|
||||
SELECT a, b FROM test ORDER BY b DESC;
|
||||
/* result:
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / no index: explain
|
||||
EXPLAIN SELECT a, b FROM test ORDER BY b DESC;
|
||||
/* result:
|
||||
{
|
||||
plan: "table.Scan(\"test\") | docs.Project(a, b) | docs.TempTreeSortReverse(b)"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / with index
|
||||
SELECT a, b FROM test ORDER BY a DESC;
|
||||
/* result:
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / with index: explain
|
||||
EXPLAIN SELECT a, b FROM test ORDER BY a DESC;
|
||||
/* result:
|
||||
{
|
||||
plan: "index.Scan(\"test_a_b_idx\") | docs.Project(a, b)"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / with index / multi field
|
||||
SELECT a, b FROM test WHERE a = 100 ORDER BY b DESC;
|
||||
/* result:
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: explain desc / with index / multi field
|
||||
EXPLAIN SELECT a, b FROM test WHERE a = 100 ORDER BY b DESC;
|
||||
/* result:
|
||||
{
|
||||
plan: "index.Scan(\"test_a_b_idx\", [{\"min\": [100], \"exact\": true}]) | docs.Project(a, b)"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / wildcard
|
||||
SELECT * FROM test ORDER BY a DESC;
|
||||
/* result:
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
*/
|
||||
83
sqltests/SELECT/order_by_desc_pk.sql
Normal file
83
sqltests/SELECT/order_by_desc_pk.sql
Normal file
@@ -0,0 +1,83 @@
|
||||
-- setup:
|
||||
CREATE TABLE test(a INT PRIMARY KEY DESC, b double);
|
||||
INSERT INTO test (a, b) VALUES (50, 2), (100, 3), (10, 1);
|
||||
|
||||
-- test: asc
|
||||
SELECT b FROM test ORDER BY a;
|
||||
/* result:
|
||||
{
|
||||
b: 1.0,
|
||||
}
|
||||
{
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
b: 3.0
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
-- test: asc / wildcard
|
||||
SELECT * FROM test ORDER BY a;
|
||||
/* result:
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
-- test: desc
|
||||
SELECT b FROM test ORDER BY a DESC;
|
||||
/* result:
|
||||
{
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
b: 1.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: explain desc
|
||||
EXPLAIN SELECT b FROM test ORDER BY a DESC;
|
||||
/* result:
|
||||
{
|
||||
plan: "table.Scan(\"test\") | docs.Project(b)"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / wildcard
|
||||
SELECT * FROM test ORDER BY a DESC;
|
||||
/* result:
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: explain desc / wildcard
|
||||
EXPLAIN SELECT * FROM test ORDER BY a DESC;
|
||||
/* result:
|
||||
{
|
||||
plan: "table.Scan(\"test\")"
|
||||
}
|
||||
*/
|
||||
153
sqltests/SELECT/order_by_desc_pk_composite.sql
Normal file
153
sqltests/SELECT/order_by_desc_pk_composite.sql
Normal file
@@ -0,0 +1,153 @@
|
||||
-- setup:
|
||||
CREATE TABLE test(a INT, b DOUBLE, PRIMARY KEY (a DESC, b DESC));
|
||||
INSERT INTO test (a, b) VALUES (50, 2), (100, 3), (10, 1), (100, 4);
|
||||
|
||||
-- test: asc
|
||||
SELECT a, b FROM test ORDER BY a;
|
||||
/* result:
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: asc / explain
|
||||
EXPLAIN SELECT a FROM test ORDER BY a;
|
||||
/* result:
|
||||
{
|
||||
plan: "table.ScanReverse(\"test\") | docs.Project(a)"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: asc / wildcard
|
||||
SELECT * FROM test ORDER BY a;
|
||||
/* result:
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / no index
|
||||
SELECT a, b FROM test ORDER BY b DESC;
|
||||
/* result:
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / no index: explain
|
||||
EXPLAIN SELECT a, b FROM test ORDER BY b DESC;
|
||||
/* result:
|
||||
{
|
||||
plan: "table.Scan(\"test\") | docs.Project(a, b) | docs.TempTreeSortReverse(b)"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / with index
|
||||
SELECT a, b FROM test ORDER BY a DESC;
|
||||
/* result:
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / with index: explain
|
||||
EXPLAIN SELECT a, b FROM test ORDER BY a DESC;
|
||||
/* result:
|
||||
{
|
||||
plan: "table.Scan(\"test\") | docs.Project(a, b)"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / with index / multi field
|
||||
SELECT a, b FROM test WHERE a = 100 ORDER BY b DESC;
|
||||
/* result:
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: explain desc / with index / multi field
|
||||
EXPLAIN SELECT a, b FROM test WHERE a = 100 ORDER BY b DESC;
|
||||
/* result:
|
||||
{
|
||||
plan: "table.Scan(\"test\", [{\"min\": [100], \"exact\": true}]) | docs.Project(a, b)"
|
||||
}
|
||||
*/
|
||||
|
||||
-- test: desc / wildcard
|
||||
SELECT * FROM test ORDER BY a DESC;
|
||||
/* result:
|
||||
{
|
||||
a: 100,
|
||||
b: 4.0
|
||||
}
|
||||
{
|
||||
a: 100,
|
||||
b: 3.0
|
||||
}
|
||||
{
|
||||
a: 50,
|
||||
b: 2.0
|
||||
}
|
||||
{
|
||||
a: 10,
|
||||
b: 1.0
|
||||
}
|
||||
*/
|
||||
Reference in New Issue
Block a user