Skip to content

Commit 4f4a0c6

Browse files
laojianzigit-hulk
authored andcommitted
Extend the index parameters to support the float type (AfterShip#74)
1 parent 84e693c commit 4f4a0c6

10 files changed

+1185
-3
lines changed

parser/ast.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1274,6 +1274,11 @@ func (a *TableIndex) String(level int) string {
12741274
builder.WriteString("INDEX")
12751275
builder.WriteByte(' ')
12761276
builder.WriteString(a.Name.String(0))
1277+
// a.ColumnExpr = *Ident --- e.g. INDEX idx column TYPE ...
1278+
// a.ColumnExpr = *ParamExprList --- e.g. INDEX idx(column) TYPE ...
1279+
if _, ok := a.ColumnExpr.(*Ident); ok {
1280+
builder.WriteByte(' ')
1281+
}
12771282
builder.WriteString(a.ColumnExpr.String(level))
12781283
builder.WriteByte(' ')
12791284
builder.WriteString("TYPE")

parser/parser_column.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -674,7 +674,7 @@ func (p *Parser) parseColumnType(_ Pos) (Expr, error) { // nolint:funlen
674674
}
675675
// like Datetime('Asia/Dubai')
676676
return p.parseColumnTypeWithParams(ident, p.Pos())
677-
case p.matchTokenKind(TokenInt):
677+
case p.matchTokenKind(TokenInt), p.matchTokenKind(TokenFloat):
678678
// fixed size
679679
return p.parseColumnTypeWithParams(ident, p.Pos())
680680
default:

parser/parser_common.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ func (p *Parser) parseString(pos Pos) (*StringLiteral, error) {
234234

235235
func (p *Parser) parseLiteral(pos Pos) (Literal, error) {
236236
switch {
237-
case p.matchTokenKind(TokenInt):
237+
case p.matchTokenKind(TokenInt), p.matchTokenKind(TokenFloat):
238238
return p.parseNumber(pos)
239239
case p.matchTokenKind(TokenString):
240240
return p.parseString(pos)

parser/parser_test.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,6 @@ func TestParser_ParseStatements(t *testing.T) {
6666
goldie.WithDiffEngine(goldie.ColoredDiff),
6767
goldie.WithFixtureDir(outputDir))
6868
g.Assert(t, entry.Name(), outputBytes)
69-
7069
})
7170
}
7271
}
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,5 @@
11
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX my_index(f0) TYPE minmax GRANULARITY 1024;
2+
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2;
3+
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3;
4+
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX content_idx content TYPE tokenbf_v1(30720, 2, 0) GRANULARITY 1;
5+
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX output_idx output TYPE ngrambf_v1(3, 10000, 2, 1) GRANULARITY 2;
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
CREATE TABLE IF NOT EXISTS test_local
2+
(
3+
`id` UInt64 CODEC(Delta, ZSTD(1)),
4+
`api_id` UInt64 CODEC(ZSTD(1)),
5+
`arr` Array(Int64),
6+
`content` String CODEC(ZSTD(1)),
7+
`output` String,
8+
INDEX id_idx id TYPE minmax GRANULARITY 10,
9+
INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2,
10+
INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3,
11+
INDEX content_idx content TYPE tokenbf_v1(30720, 2, 0) GRANULARITY 1,
12+
INDEX output_idx output TYPE ngrambf_v1(3, 10000, 2, 1) GRANULARITY 2
13+
)
14+
ENGINE = ReplicatedMergeTree('/root/test_local', '{replica}')
15+
PARTITION BY toStartOfHour(`timestamp`)
16+
ORDER BY (toUnixTimestamp64Nano(`timestamp`), `api_id`)
17+
TTL toStartOfHour(`timestamp`) + INTERVAL 7 DAY,toStartOfHour(`timestamp`) + INTERVAL 2 DAY
18+
SETTINGS execute_merges_on_single_replica_time_threshold=1200, index_granularity=16384, max_bytes_to_merge_at_max_space_in_pool=64424509440, storage_policy='main', ttl_only_drop_parts=1;
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,24 @@
11
-- Origin SQL:
22
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX my_index(f0) TYPE minmax GRANULARITY 1024;
3+
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2;
4+
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3;
5+
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX content_idx content TYPE tokenbf_v1(30720, 2, 0) GRANULARITY 1;
6+
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX output_idx output TYPE ngrambf_v1(3, 10000, 2, 1) GRANULARITY 2;
37

48

59
-- Format SQL:
610
ALTER TABLE test.events_local
711
ON CLUSTER 'default_cluster'
812
ADD INDEX my_index(f0) TYPE minmax GRANULARITY 1024;
13+
ALTER TABLE test.events_local
14+
ON CLUSTER 'default_cluster'
15+
ADD INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2;
16+
ALTER TABLE test.events_local
17+
ON CLUSTER 'default_cluster'
18+
ADD INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3;
19+
ALTER TABLE test.events_local
20+
ON CLUSTER 'default_cluster'
21+
ADD INDEX content_idx content TYPE tokenbf_v1(30720,2,0) GRANULARITY 1;
22+
ALTER TABLE test.events_local
23+
ON CLUSTER 'default_cluster'
24+
ADD INDEX output_idx output TYPE ngrambf_v1(3,10000,2,1) GRANULARITY 2;
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
-- Origin SQL:
2+
CREATE TABLE IF NOT EXISTS test_local
3+
(
4+
`id` UInt64 CODEC(Delta, ZSTD(1)),
5+
`api_id` UInt64 CODEC(ZSTD(1)),
6+
`arr` Array(Int64),
7+
`content` String CODEC(ZSTD(1)),
8+
`output` String,
9+
INDEX id_idx id TYPE minmax GRANULARITY 10,
10+
INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2,
11+
INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3,
12+
INDEX content_idx content TYPE tokenbf_v1(30720, 2, 0) GRANULARITY 1,
13+
INDEX output_idx output TYPE ngrambf_v1(3, 10000, 2, 1) GRANULARITY 2
14+
)
15+
ENGINE = ReplicatedMergeTree('/root/test_local', '{replica}')
16+
PARTITION BY toStartOfHour(`timestamp`)
17+
ORDER BY (toUnixTimestamp64Nano(`timestamp`), `api_id`)
18+
TTL toStartOfHour(`timestamp`) + INTERVAL 7 DAY,toStartOfHour(`timestamp`) + INTERVAL 2 DAY
19+
SETTINGS execute_merges_on_single_replica_time_threshold=1200, index_granularity=16384, max_bytes_to_merge_at_max_space_in_pool=64424509440, storage_policy='main', ttl_only_drop_parts=1;
20+
21+
22+
-- Format SQL:
23+
CREATE TABLE IF NOT EXISTS test_local
24+
(
25+
`id` UInt64 CODEC(Delta, ZSTD(1)),
26+
`api_id` UInt64 CODEC(ZSTD(1)),
27+
`arr` Array(Int64),
28+
`content` String CODEC(ZSTD(1)),
29+
`output` String,
30+
INDEX id_idx id TYPE minmax GRANULARITY 10,
31+
INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2,
32+
INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3,
33+
INDEX content_idx content TYPE tokenbf_v1(30720,2,0) GRANULARITY 1,
34+
INDEX output_idx output TYPE ngrambf_v1(3,10000,2,1) GRANULARITY 2
35+
)
36+
ENGINE = ReplicatedMergeTree('/root/test_local', '{replica}')
37+
PARTITION BY toStartOfHour(`timestamp`)
38+
TTL toStartOfHour(`timestamp`) + INTERVAL 7 DAY,toStartOfHour(`timestamp`) + INTERVAL 2 DAY
39+
SETTINGS execute_merges_on_single_replica_time_threshold=1200, index_granularity=16384, max_bytes_to_merge_at_max_space_in_pool=64424509440, storage_policy='main', ttl_only_drop_parts=1
40+
ORDER BY (toUnixTimestamp64Nano(`timestamp`), `api_id`);

0 commit comments

Comments
 (0)