@housekit/orm 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +224 -0
- package/dist/builders/delete.d.ts +21 -0
- package/dist/builders/insert.d.ts +128 -0
- package/dist/builders/prepared.d.ts +11 -0
- package/dist/builders/select.d.ts +352 -0
- package/dist/builders/select.types.d.ts +76 -0
- package/dist/builders/update.d.ts +23 -0
- package/dist/client.d.ts +52 -0
- package/dist/codegen/zod.d.ts +4 -0
- package/dist/column.d.ts +76 -0
- package/dist/compiler.d.ts +27 -0
- package/dist/core.d.ts +6 -0
- package/dist/data-types.d.ts +150 -0
- package/dist/dictionary.d.ts +263 -0
- package/dist/engines.d.ts +558 -0
- package/dist/expressions.d.ts +72 -0
- package/dist/external.d.ts +177 -0
- package/dist/index.d.ts +187 -0
- package/dist/index.js +222 -0
- package/dist/logger.d.ts +8 -0
- package/dist/materialized-views.d.ts +271 -0
- package/dist/metadata.d.ts +33 -0
- package/dist/modules/aggregates.d.ts +205 -0
- package/dist/modules/array.d.ts +122 -0
- package/dist/modules/conditional.d.ts +110 -0
- package/dist/modules/conversion.d.ts +189 -0
- package/dist/modules/geo.d.ts +202 -0
- package/dist/modules/hash.d.ts +7 -0
- package/dist/modules/index.d.ts +12 -0
- package/dist/modules/json.d.ts +130 -0
- package/dist/modules/math.d.ts +28 -0
- package/dist/modules/string.d.ts +167 -0
- package/dist/modules/time.d.ts +154 -0
- package/dist/modules/types.d.ts +177 -0
- package/dist/modules/window.d.ts +27 -0
- package/dist/relational.d.ts +33 -0
- package/dist/relations.d.ts +15 -0
- package/dist/schema-builder.d.ts +172 -0
- package/dist/table.d.ts +172 -0
- package/dist/utils/background-batcher.d.ts +20 -0
- package/dist/utils/batch-transform.d.ts +20 -0
- package/dist/utils/binary-reader.d.ts +48 -0
- package/dist/utils/binary-serializer.d.ts +160 -0
- package/dist/utils/binary-worker-code.d.ts +1 -0
- package/dist/utils/binary-worker-pool.d.ts +76 -0
- package/dist/utils/binary-worker.d.ts +12 -0
- package/dist/utils/insert-processing.d.ts +23 -0
- package/dist/utils/lru-cache.d.ts +10 -0
- package/package.json +68 -0
|
@@ -0,0 +1,558 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* HouseKit Engine DSL - First-class ClickHouse Engine Support
|
|
3
|
+
*
|
|
4
|
+
* This module provides type-safe engine configurations for ClickHouse tables.
|
|
5
|
+
* Unlike generic ORMs which often treat engines as raw strings, HouseKit validates engine
|
|
6
|
+
* parameters at compile-time and provides intelligent defaults.
|
|
7
|
+
*/
|
|
8
|
+
import type { TableDefinition, TableColumns } from './table';
|
|
9
|
+
/**
|
|
10
|
+
* Base configuration shared by all MergeTree-family engines
|
|
11
|
+
*/
|
|
12
|
+
export interface MergeTreeBaseConfig {
|
|
13
|
+
/**
|
|
14
|
+
* Experimental: Enable lightweight DELETE/UPDATE operations
|
|
15
|
+
* Requires ClickHouse 23.3+
|
|
16
|
+
*/
|
|
17
|
+
enableLightweightDeletes?: boolean;
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Configuration for basic MergeTree engine
|
|
21
|
+
*/
|
|
22
|
+
export interface MergeTreeConfig extends MergeTreeBaseConfig {
|
|
23
|
+
type: 'MergeTree';
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Configuration for ReplacingMergeTree engine
|
|
27
|
+
* Used for deduplication scenarios where the last version of a row should be kept
|
|
28
|
+
*/
|
|
29
|
+
export interface ReplacingMergeTreeConfig extends MergeTreeBaseConfig {
|
|
30
|
+
type: 'ReplacingMergeTree';
|
|
31
|
+
/** Column used to determine which row version to keep (newer wins) */
|
|
32
|
+
versionColumn?: string;
|
|
33
|
+
/**
|
|
34
|
+
* Column indicating if row is deleted (ClickHouse 23.2+)
|
|
35
|
+
* When 1, the row is considered deleted during FINAL merges
|
|
36
|
+
*/
|
|
37
|
+
isDeletedColumn?: string;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Configuration for SummingMergeTree engine
|
|
41
|
+
* Automatically sums numeric columns during merges
|
|
42
|
+
*/
|
|
43
|
+
export interface SummingMergeTreeConfig extends MergeTreeBaseConfig {
|
|
44
|
+
type: 'SummingMergeTree';
|
|
45
|
+
/** Columns to sum. If empty, sums all numeric columns not in ORDER BY */
|
|
46
|
+
columns?: string[];
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Configuration for AggregatingMergeTree engine
|
|
50
|
+
* Used with AggregateFunction columns for pre-aggregated materialized views
|
|
51
|
+
*/
|
|
52
|
+
export interface AggregatingMergeTreeConfig extends MergeTreeBaseConfig {
|
|
53
|
+
type: 'AggregatingMergeTree';
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Configuration for CollapsingMergeTree engine
|
|
57
|
+
* Uses a sign column to collapse pairs of rows with opposite signs
|
|
58
|
+
*/
|
|
59
|
+
export interface CollapsingMergeTreeConfig extends MergeTreeBaseConfig {
|
|
60
|
+
type: 'CollapsingMergeTree';
|
|
61
|
+
/** Column containing 1 or -1 to indicate row state */
|
|
62
|
+
signColumn: string;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Configuration for VersionedCollapsingMergeTree engine
|
|
66
|
+
* Like CollapsingMergeTree but with version column for more robust deduplication
|
|
67
|
+
*/
|
|
68
|
+
export interface VersionedCollapsingMergeTreeConfig extends MergeTreeBaseConfig {
|
|
69
|
+
type: 'VersionedCollapsingMergeTree';
|
|
70
|
+
/** Column containing 1 or -1 to indicate row state */
|
|
71
|
+
signColumn: string;
|
|
72
|
+
/** Version column for ordering rows */
|
|
73
|
+
versionColumn: string;
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Configuration for GraphiteMergeTree engine
|
|
77
|
+
* Optimized for storing Graphite metrics data
|
|
78
|
+
*/
|
|
79
|
+
export interface GraphiteMergeTreeConfig extends MergeTreeBaseConfig {
|
|
80
|
+
type: 'GraphiteMergeTree';
|
|
81
|
+
/** Name of the Graphite rollup configuration */
|
|
82
|
+
configSection: string;
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Configuration for ReplicatedMergeTree engine
|
|
86
|
+
* Provides data replication across ClickHouse cluster nodes
|
|
87
|
+
*/
|
|
88
|
+
export interface ReplicatedMergeTreeConfig extends MergeTreeBaseConfig {
|
|
89
|
+
type: 'ReplicatedMergeTree';
|
|
90
|
+
/**
|
|
91
|
+
* Path in ZooKeeper for this table's replication
|
|
92
|
+
* Supports macros: {shard}, {replica}, {database}, {table}
|
|
93
|
+
* @default '/clickhouse/tables/{shard}/{database}/{table}'
|
|
94
|
+
*/
|
|
95
|
+
zkPath?: string;
|
|
96
|
+
/**
|
|
97
|
+
* Unique replica identifier
|
|
98
|
+
* Supports macros: {replica}, {hostname}
|
|
99
|
+
* @default '{replica}'
|
|
100
|
+
*/
|
|
101
|
+
replicaName?: string;
|
|
102
|
+
/** Base engine type to replicate (defaults to MergeTree) */
|
|
103
|
+
baseEngine?: 'MergeTree' | 'ReplacingMergeTree' | 'SummingMergeTree' | 'AggregatingMergeTree' | 'CollapsingMergeTree' | 'VersionedCollapsingMergeTree';
|
|
104
|
+
/** Configuration for ReplacingMergeTree base */
|
|
105
|
+
versionColumn?: string;
|
|
106
|
+
isDeletedColumn?: string;
|
|
107
|
+
/** Configuration for SummingMergeTree base */
|
|
108
|
+
sumColumns?: string[];
|
|
109
|
+
/** Configuration for CollapsingMergeTree base */
|
|
110
|
+
signColumn?: string;
|
|
111
|
+
}
|
|
112
|
+
/**
|
|
113
|
+
* Configuration for Buffer engine
|
|
114
|
+
* Buffers writes in memory before flushing to a target table
|
|
115
|
+
* Excellent for high-throughput insert scenarios
|
|
116
|
+
*/
|
|
117
|
+
export interface BufferConfig {
|
|
118
|
+
type: 'Buffer';
|
|
119
|
+
/** Database of the target table (use 'currentDatabase()' for same database) */
|
|
120
|
+
database: string;
|
|
121
|
+
/** Name of the target table */
|
|
122
|
+
table: string;
|
|
123
|
+
/** Number of buffer layers (typically 16) */
|
|
124
|
+
layers: number;
|
|
125
|
+
/** Minimum time (seconds) before flush */
|
|
126
|
+
minTime: number;
|
|
127
|
+
/** Maximum time (seconds) before flush */
|
|
128
|
+
maxTime: number;
|
|
129
|
+
/** Minimum rows before flush */
|
|
130
|
+
minRows: number;
|
|
131
|
+
/** Maximum rows before flush */
|
|
132
|
+
maxRows: number;
|
|
133
|
+
/** Minimum bytes before flush */
|
|
134
|
+
minBytes: number;
|
|
135
|
+
/** Maximum bytes before flush */
|
|
136
|
+
maxBytes: number;
|
|
137
|
+
}
|
|
138
|
+
/**
|
|
139
|
+
* Configuration for Distributed engine
|
|
140
|
+
* Routes queries across a cluster of ClickHouse nodes
|
|
141
|
+
*/
|
|
142
|
+
export interface DistributedConfig {
|
|
143
|
+
type: 'Distributed';
|
|
144
|
+
/** Cluster name as defined in ClickHouse configuration */
|
|
145
|
+
cluster: string;
|
|
146
|
+
/** Database name on remote servers */
|
|
147
|
+
database: string;
|
|
148
|
+
/** Table name on remote servers */
|
|
149
|
+
table: string;
|
|
150
|
+
/**
|
|
151
|
+
* Expression to determine which shard receives each row
|
|
152
|
+
* @default 'rand()'
|
|
153
|
+
*/
|
|
154
|
+
shardingKey?: string;
|
|
155
|
+
/** Policy name for selecting replicas */
|
|
156
|
+
policyName?: string;
|
|
157
|
+
}
|
|
158
|
+
/**
|
|
159
|
+
* Configuration for Null engine
|
|
160
|
+
* Data is discarded (useful for testing or as data sink)
|
|
161
|
+
*/
|
|
162
|
+
export interface NullConfig {
|
|
163
|
+
type: 'Null';
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* Configuration for Log engine
|
|
167
|
+
* Simple append-only storage, no indices
|
|
168
|
+
*/
|
|
169
|
+
export interface LogConfig {
|
|
170
|
+
type: 'Log';
|
|
171
|
+
}
|
|
172
|
+
/**
|
|
173
|
+
* Configuration for TinyLog engine
|
|
174
|
+
* Like Log but stores each column in a separate file
|
|
175
|
+
*/
|
|
176
|
+
export interface TinyLogConfig {
|
|
177
|
+
type: 'TinyLog';
|
|
178
|
+
}
|
|
179
|
+
/**
|
|
180
|
+
* Configuration for Memory engine
|
|
181
|
+
* Stores data in RAM, lost on restart
|
|
182
|
+
*/
|
|
183
|
+
export interface MemoryConfig {
|
|
184
|
+
type: 'Memory';
|
|
185
|
+
/** Maximum number of rows to store */
|
|
186
|
+
maxRows?: number;
|
|
187
|
+
/** Maximum bytes to store */
|
|
188
|
+
maxBytes?: number;
|
|
189
|
+
/** Compress data in memory */
|
|
190
|
+
compress?: boolean;
|
|
191
|
+
}
|
|
192
|
+
/**
|
|
193
|
+
* Configuration for Join engine
|
|
194
|
+
* Stores data for JOIN operations
|
|
195
|
+
*/
|
|
196
|
+
export interface JoinConfig {
|
|
197
|
+
type: 'Join';
|
|
198
|
+
/** Join strictness: any match or all matches */
|
|
199
|
+
strictness: 'Any' | 'All' | 'Semi' | 'Anti';
|
|
200
|
+
/** Join type */
|
|
201
|
+
joinType: 'Inner' | 'Left' | 'Right' | 'Full' | 'Cross';
|
|
202
|
+
/** Key columns for the join */
|
|
203
|
+
keys: string[];
|
|
204
|
+
}
|
|
205
|
+
/**
|
|
206
|
+
* Configuration for Dictionary engine
|
|
207
|
+
* Wraps a ClickHouse dictionary as a table
|
|
208
|
+
*/
|
|
209
|
+
export interface DictionaryConfig {
|
|
210
|
+
type: 'Dictionary';
|
|
211
|
+
/** Name of the dictionary */
|
|
212
|
+
dictionaryName: string;
|
|
213
|
+
}
|
|
214
|
+
/**
|
|
215
|
+
* Configuration for File engine
|
|
216
|
+
* Reads/writes data from/to a file in a specified format
|
|
217
|
+
*/
|
|
218
|
+
export interface FileConfig {
|
|
219
|
+
type: 'File';
|
|
220
|
+
/** Data format (e.g., 'TabSeparated', 'CSV', 'JSONEachRow') */
|
|
221
|
+
format: string;
|
|
222
|
+
/** Optional: compression type */
|
|
223
|
+
compression?: 'none' | 'gzip' | 'lz4' | 'zstd';
|
|
224
|
+
}
|
|
225
|
+
/**
|
|
226
|
+
* Configuration for URL engine
|
|
227
|
+
* Reads data from a remote URL
|
|
228
|
+
*/
|
|
229
|
+
export interface URLConfig {
|
|
230
|
+
type: 'URL';
|
|
231
|
+
/** URL to read from */
|
|
232
|
+
url: string;
|
|
233
|
+
/** Data format */
|
|
234
|
+
format: string;
|
|
235
|
+
/** Optional: compression type */
|
|
236
|
+
compression?: 'none' | 'gzip' | 'lz4' | 'zstd';
|
|
237
|
+
}
|
|
238
|
+
/**
|
|
239
|
+
* Configuration for S3 engine
|
|
240
|
+
* Reads/writes data to Amazon S3
|
|
241
|
+
*/
|
|
242
|
+
export interface S3Config {
|
|
243
|
+
type: 'S3';
|
|
244
|
+
/** S3 URL pattern */
|
|
245
|
+
path: string;
|
|
246
|
+
/** Data format */
|
|
247
|
+
format: string;
|
|
248
|
+
/** Optional: AWS access key ID */
|
|
249
|
+
accessKeyId?: string;
|
|
250
|
+
/** Optional: AWS secret access key */
|
|
251
|
+
secretAccessKey?: string;
|
|
252
|
+
/** Optional: compression type */
|
|
253
|
+
compression?: 'none' | 'gzip' | 'lz4' | 'zstd';
|
|
254
|
+
}
|
|
255
|
+
/**
|
|
256
|
+
* Configuration for Kafka engine
|
|
257
|
+
* Consumes messages from Apache Kafka
|
|
258
|
+
*/
|
|
259
|
+
export interface KafkaConfig {
|
|
260
|
+
type: 'Kafka';
|
|
261
|
+
/** Kafka broker list */
|
|
262
|
+
brokerList: string;
|
|
263
|
+
/** Topic name(s) */
|
|
264
|
+
topicList: string | string[];
|
|
265
|
+
/** Consumer group ID */
|
|
266
|
+
groupName: string;
|
|
267
|
+
/** Data format for messages */
|
|
268
|
+
format: string;
|
|
269
|
+
/** Number of polling threads */
|
|
270
|
+
numConsumers?: number;
|
|
271
|
+
/** Max rows per poll */
|
|
272
|
+
maxBlockSize?: number;
|
|
273
|
+
/** Skip broken messages */
|
|
274
|
+
skipBroken?: number;
|
|
275
|
+
}
|
|
276
|
+
/**
|
|
277
|
+
* Configuration for PostgreSQL engine
|
|
278
|
+
* Reads data from PostgreSQL database
|
|
279
|
+
*/
|
|
280
|
+
export interface PostgreSQLConfig {
|
|
281
|
+
type: 'PostgreSQL';
|
|
282
|
+
/** PostgreSQL host */
|
|
283
|
+
host: string;
|
|
284
|
+
/** PostgreSQL port */
|
|
285
|
+
port: number;
|
|
286
|
+
/** Database name */
|
|
287
|
+
database: string;
|
|
288
|
+
/** Table name */
|
|
289
|
+
table: string;
|
|
290
|
+
/** Username */
|
|
291
|
+
user: string;
|
|
292
|
+
/** Password */
|
|
293
|
+
password: string;
|
|
294
|
+
/** Schema name */
|
|
295
|
+
schema?: string;
|
|
296
|
+
}
|
|
297
|
+
/**
|
|
298
|
+
* Configuration for MySQL engine
|
|
299
|
+
* Reads data from MySQL database
|
|
300
|
+
*/
|
|
301
|
+
export interface MySQLConfig {
|
|
302
|
+
type: 'MySQL';
|
|
303
|
+
/** MySQL host */
|
|
304
|
+
host: string;
|
|
305
|
+
/** MySQL port */
|
|
306
|
+
port: number;
|
|
307
|
+
/** Database name */
|
|
308
|
+
database: string;
|
|
309
|
+
/** Table name */
|
|
310
|
+
table: string;
|
|
311
|
+
/** Username */
|
|
312
|
+
user: string;
|
|
313
|
+
/** Password */
|
|
314
|
+
password: string;
|
|
315
|
+
}
|
|
316
|
+
/**
|
|
317
|
+
* Union type of all supported engine configurations
|
|
318
|
+
*/
|
|
319
|
+
export type EngineConfiguration = MergeTreeConfig | ReplacingMergeTreeConfig | SummingMergeTreeConfig | AggregatingMergeTreeConfig | CollapsingMergeTreeConfig | VersionedCollapsingMergeTreeConfig | GraphiteMergeTreeConfig | ReplicatedMergeTreeConfig | BufferConfig | DistributedConfig | NullConfig | LogConfig | TinyLogConfig | MemoryConfig | JoinConfig | DictionaryConfig | FileConfig | URLConfig | S3Config | KafkaConfig | PostgreSQLConfig | MySQLConfig;
|
|
320
|
+
/**
|
|
321
|
+
* Factory functions for creating type-safe engine configurations.
|
|
322
|
+
*
|
|
323
|
+
* @example
|
|
324
|
+
* ```typescript
|
|
325
|
+
* import { table, t, Engine } from '@housekit/orm';
|
|
326
|
+
*
|
|
327
|
+
* const events = table('events', (t) => ({
|
|
328
|
+
* id: text('id'),
|
|
329
|
+
* timestamp: timestamp('timestamp'),
|
|
330
|
+
* }, {
|
|
331
|
+
* engine: Engine.ReplicatedMergeTree(),
|
|
332
|
+
* orderBy: 'timestamp',
|
|
333
|
+
* });
|
|
334
|
+
* ```
|
|
335
|
+
*/
|
|
336
|
+
export declare const Engine: {
|
|
337
|
+
/**
|
|
338
|
+
* The most versatile and powerful ClickHouse engine.
|
|
339
|
+
* Designed for high-volume data insertion.
|
|
340
|
+
* @see https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree
|
|
341
|
+
*/
|
|
342
|
+
MergeTree: (options?: Omit<MergeTreeConfig, "type">) => MergeTreeConfig;
|
|
343
|
+
/**
|
|
344
|
+
* Removes duplicates with the same sorting key during merges.
|
|
345
|
+
* @param versionColumn - Column (UInt* or DateTime) to determine which row is the latest.
|
|
346
|
+
*/
|
|
347
|
+
ReplacingMergeTree: (versionColumn?: string, isDeletedColumn?: string, options?: Omit<ReplacingMergeTreeConfig, "type" | "versionColumn" | "isDeletedColumn">) => ReplacingMergeTreeConfig;
|
|
348
|
+
/**
|
|
349
|
+
* SummingMergeTree - automatic summation of numeric columns during merges
|
|
350
|
+
*
|
|
351
|
+
* @example
|
|
352
|
+
* ```typescript
|
|
353
|
+
* engine: Engine.SummingMergeTree(['amount', 'count'])
|
|
354
|
+
* ```
|
|
355
|
+
*/
|
|
356
|
+
SummingMergeTree: (columns?: string[], options?: Omit<SummingMergeTreeConfig, "type" | "columns">) => SummingMergeTreeConfig;
|
|
357
|
+
/**
|
|
358
|
+
* AggregatingMergeTree - for use with AggregateFunction columns
|
|
359
|
+
*/
|
|
360
|
+
AggregatingMergeTree: (options?: Omit<AggregatingMergeTreeConfig, "type">) => AggregatingMergeTreeConfig;
|
|
361
|
+
/**
|
|
362
|
+
* CollapsingMergeTree - uses sign column to collapse row pairs
|
|
363
|
+
*
|
|
364
|
+
* @example
|
|
365
|
+
* ```typescript
|
|
366
|
+
* engine: Engine.CollapsingMergeTree('sign')
|
|
367
|
+
* ```
|
|
368
|
+
*/
|
|
369
|
+
CollapsingMergeTree: (signColumn: string, options?: Omit<CollapsingMergeTreeConfig, "type" | "signColumn">) => CollapsingMergeTreeConfig;
|
|
370
|
+
/**
|
|
371
|
+
* VersionedCollapsingMergeTree - CollapsingMergeTree with version support
|
|
372
|
+
*
|
|
373
|
+
* @example
|
|
374
|
+
* ```typescript
|
|
375
|
+
* engine: Engine.VersionedCollapsingMergeTree('sign', 'version')
|
|
376
|
+
* ```
|
|
377
|
+
*/
|
|
378
|
+
VersionedCollapsingMergeTree: (signColumn: string, versionColumn: string, options?: Omit<VersionedCollapsingMergeTreeConfig, "type" | "signColumn" | "versionColumn">) => VersionedCollapsingMergeTreeConfig;
|
|
379
|
+
/**
|
|
380
|
+
* GraphiteMergeTree - optimized for Graphite metrics
|
|
381
|
+
*
|
|
382
|
+
* @example
|
|
383
|
+
* ```typescript
|
|
384
|
+
* engine: Engine.GraphiteMergeTree('graphite_rollup')
|
|
385
|
+
* ```
|
|
386
|
+
*/
|
|
387
|
+
GraphiteMergeTree: (configSection: string, options?: Omit<GraphiteMergeTreeConfig, "type" | "configSection">) => GraphiteMergeTreeConfig;
|
|
388
|
+
/**
|
|
389
|
+
* ReplicatedMergeTree - data replication across cluster nodes
|
|
390
|
+
*
|
|
391
|
+
* HouseKit provides sensible defaults using ClickHouse macros that work
|
|
392
|
+
* out-of-the-box in most cluster configurations.
|
|
393
|
+
*
|
|
394
|
+
* @example
|
|
395
|
+
* ```typescript
|
|
396
|
+
* // Basic usage with defaults
|
|
397
|
+
* engine: Engine.ReplicatedMergeTree()
|
|
398
|
+
*
|
|
399
|
+
* // Custom ZK path
|
|
400
|
+
* engine: Engine.ReplicatedMergeTree({
|
|
401
|
+
* zkPath: '/clickhouse/prod/tables/{shard}/events',
|
|
402
|
+
* replicaName: '{replica}'
|
|
403
|
+
* })
|
|
404
|
+
*
|
|
405
|
+
* // Replicated ReplacingMergeTree
|
|
406
|
+
* engine: Engine.ReplicatedMergeTree({
|
|
407
|
+
* baseEngine: 'ReplacingMergeTree',
|
|
408
|
+
* versionColumn: 'updated_at'
|
|
409
|
+
* })
|
|
410
|
+
* ```
|
|
411
|
+
*/
|
|
412
|
+
ReplicatedMergeTree: (config?: Omit<ReplicatedMergeTreeConfig, "type">) => ReplicatedMergeTreeConfig;
|
|
413
|
+
/**
|
|
414
|
+
* Buffer engine - buffers inserts before flushing to a target table
|
|
415
|
+
*
|
|
416
|
+
* Excellent for high-throughput scenarios where you want to reduce
|
|
417
|
+
* the number of parts created by batching inserts.
|
|
418
|
+
*
|
|
419
|
+
* @example
|
|
420
|
+
* ```typescript
|
|
421
|
+
* // Create buffer with target table reference
|
|
422
|
+
* const eventsBuffer = table('events_buffer', events.$columns, {
|
|
423
|
+
* engine: Engine.Buffer(events, { minRows: 1000, maxRows: 10000 })
|
|
424
|
+
* });
|
|
425
|
+
* ```
|
|
426
|
+
*/
|
|
427
|
+
Buffer: <T extends TableColumns>(targetTable: TableDefinition<T>, opts: {
|
|
428
|
+
minRows: number;
|
|
429
|
+
maxRows: number;
|
|
430
|
+
layers?: number;
|
|
431
|
+
minTime?: number;
|
|
432
|
+
maxTime?: number;
|
|
433
|
+
minBytes?: number;
|
|
434
|
+
maxBytes?: number;
|
|
435
|
+
}) => BufferConfig;
|
|
436
|
+
/**
|
|
437
|
+
* Buffer engine with explicit database and table names
|
|
438
|
+
*/
|
|
439
|
+
BufferExplicit: (config: Omit<BufferConfig, "type">) => BufferConfig;
|
|
440
|
+
/**
|
|
441
|
+
* Distributed engine - distributes queries across cluster shards
|
|
442
|
+
*
|
|
443
|
+
* @example
|
|
444
|
+
* ```typescript
|
|
445
|
+
* const eventsDistributed = table('events_distributed', events.$columns, {
|
|
446
|
+
* engine: Engine.Distributed({
|
|
447
|
+
* cluster: 'my_cluster',
|
|
448
|
+
* database: 'default',
|
|
449
|
+
* table: 'events_local',
|
|
450
|
+
* shardingKey: 'user_id'
|
|
451
|
+
* })
|
|
452
|
+
* });
|
|
453
|
+
* ```
|
|
454
|
+
*/
|
|
455
|
+
Distributed: (config: Omit<DistributedConfig, "type">) => DistributedConfig;
|
|
456
|
+
/**
|
|
457
|
+
* Null engine - discards all data (useful for testing)
|
|
458
|
+
*/
|
|
459
|
+
Null: () => NullConfig;
|
|
460
|
+
/**
|
|
461
|
+
* Log engine - simple append-only storage
|
|
462
|
+
*/
|
|
463
|
+
Log: () => LogConfig;
|
|
464
|
+
/**
|
|
465
|
+
* TinyLog engine - lightweight logging
|
|
466
|
+
*/
|
|
467
|
+
TinyLog: () => TinyLogConfig;
|
|
468
|
+
/**
|
|
469
|
+
* Memory engine - stores all data in RAM
|
|
470
|
+
*
|
|
471
|
+
* @example
|
|
472
|
+
* ```typescript
|
|
473
|
+
* engine: Engine.Memory({ maxRows: 100000 })
|
|
474
|
+
* ```
|
|
475
|
+
*/
|
|
476
|
+
Memory: (config?: Omit<MemoryConfig, "type">) => MemoryConfig;
|
|
477
|
+
/**
|
|
478
|
+
* Join engine - optimized for JOIN operations
|
|
479
|
+
*
|
|
480
|
+
* @example
|
|
481
|
+
* ```typescript
|
|
482
|
+
* engine: Engine.Join('Any', 'Left', ['user_id'])
|
|
483
|
+
* ```
|
|
484
|
+
*/
|
|
485
|
+
Join: (strictness: JoinConfig["strictness"], joinType: JoinConfig["joinType"], keys: string[]) => JoinConfig;
|
|
486
|
+
/**
|
|
487
|
+
* Dictionary engine - wraps a dictionary as a table
|
|
488
|
+
*/
|
|
489
|
+
Dictionary: (dictionaryName: string) => DictionaryConfig;
|
|
490
|
+
/**
|
|
491
|
+
* File engine - read/write from files
|
|
492
|
+
*
|
|
493
|
+
* @example
|
|
494
|
+
* ```typescript
|
|
495
|
+
* engine: Engine.File('CSV')
|
|
496
|
+
* engine: Engine.File('JSONEachRow', 'gzip')
|
|
497
|
+
* ```
|
|
498
|
+
*/
|
|
499
|
+
File: (format: string, compression?: FileConfig["compression"]) => FileConfig;
|
|
500
|
+
/**
|
|
501
|
+
* URL engine - read from remote URLs
|
|
502
|
+
*/
|
|
503
|
+
URL: (url: string, format: string, compression?: URLConfig["compression"]) => URLConfig;
|
|
504
|
+
/**
|
|
505
|
+
* S3 engine - read/write to Amazon S3
|
|
506
|
+
*
|
|
507
|
+
* @example
|
|
508
|
+
* ```typescript
|
|
509
|
+
* engine: Engine.S3({
|
|
510
|
+
* path: 's3://bucket/path/data.parquet',
|
|
511
|
+
* format: 'Parquet'
|
|
512
|
+
* })
|
|
513
|
+
* ```
|
|
514
|
+
*/
|
|
515
|
+
S3: (config: Omit<S3Config, "type">) => S3Config;
|
|
516
|
+
/**
|
|
517
|
+
* Kafka engine - consume from Kafka topics
|
|
518
|
+
*
|
|
519
|
+
* @example
|
|
520
|
+
* ```typescript
|
|
521
|
+
* engine: Engine.Kafka({
|
|
522
|
+
* brokerList: 'kafka:9092',
|
|
523
|
+
* topicList: 'events',
|
|
524
|
+
* groupName: 'clickhouse_consumer',
|
|
525
|
+
* format: 'JSONEachRow'
|
|
526
|
+
* })
|
|
527
|
+
* ```
|
|
528
|
+
*/
|
|
529
|
+
Kafka: (config: Omit<KafkaConfig, "type">) => KafkaConfig;
|
|
530
|
+
/**
|
|
531
|
+
* PostgreSQL engine - read from PostgreSQL
|
|
532
|
+
*/
|
|
533
|
+
PostgreSQL: (config: Omit<PostgreSQLConfig, "type">) => PostgreSQLConfig;
|
|
534
|
+
/**
|
|
535
|
+
* MySQL engine - read from MySQL
|
|
536
|
+
*/
|
|
537
|
+
MySQL: (config: Omit<MySQLConfig, "type">) => MySQLConfig;
|
|
538
|
+
};
|
|
539
|
+
/**
|
|
540
|
+
* Renders an EngineConfiguration to its SQL representation.
|
|
541
|
+
* This is used internally by defineTable() but can be useful for debugging.
|
|
542
|
+
*
|
|
543
|
+
* @param engine - The engine configuration or raw string
|
|
544
|
+
* @returns SQL string for the ENGINE clause
|
|
545
|
+
*/
|
|
546
|
+
export declare function renderEngineSQL(engine: EngineConfiguration | undefined): string;
|
|
547
|
+
/**
|
|
548
|
+
* Checks if an engine configuration is from the MergeTree family
|
|
549
|
+
*/
|
|
550
|
+
export declare function isMergeTreeFamily(engine: string | EngineConfiguration | undefined): boolean;
|
|
551
|
+
/**
|
|
552
|
+
* Checks if an engine configuration is replicated
|
|
553
|
+
*/
|
|
554
|
+
export declare function isReplicatedEngine(engine: string | EngineConfiguration | undefined): boolean;
|
|
555
|
+
/**
|
|
556
|
+
* Extracts the version column from an engine configuration if applicable
|
|
557
|
+
*/
|
|
558
|
+
export declare function getVersionColumn(engine: string | EngineConfiguration | undefined): string | undefined;
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import { ClickHouseColumn } from './core';
|
|
2
|
+
export type SQLWrapper = SQLExpression | ClickHouseColumn | SQLValue;
|
|
3
|
+
export type SQLValue = string | number | boolean | Date | null | string[] | number[];
|
|
4
|
+
export interface ToSQLOptions {
|
|
5
|
+
ignoreTablePrefix?: boolean;
|
|
6
|
+
table?: {
|
|
7
|
+
$columns: Record<string, any>;
|
|
8
|
+
};
|
|
9
|
+
}
|
|
10
|
+
export type AliasedExpression<TResult = any, TAlias extends string = string> = SQLExpression<TResult> & {
|
|
11
|
+
_alias: TAlias;
|
|
12
|
+
};
|
|
13
|
+
export interface SQLExpression<TResult = any> {
|
|
14
|
+
_type: TResult;
|
|
15
|
+
toSQL(options?: ToSQLOptions): {
|
|
16
|
+
sql: string;
|
|
17
|
+
params: Record<string, unknown>;
|
|
18
|
+
};
|
|
19
|
+
as<TAlias extends string>(alias: TAlias): AliasedExpression<TResult, TAlias>;
|
|
20
|
+
walk(visitor: (value: any, type: string) => void): string;
|
|
21
|
+
}
|
|
22
|
+
export declare class SQL<TResult = any> implements SQLExpression<TResult> {
|
|
23
|
+
readonly queryChunks: string[];
|
|
24
|
+
readonly params: any[];
|
|
25
|
+
readonly _type: TResult;
|
|
26
|
+
constructor(queryChunks: string[], params: any[]);
|
|
27
|
+
as<TAlias extends string>(alias: TAlias): AliasedExpression<TResult, TAlias>;
|
|
28
|
+
walk(visitor: (value: any, type: string) => void): string;
|
|
29
|
+
private formatColumn;
|
|
30
|
+
toSQL(options?: ToSQLOptions): {
|
|
31
|
+
sql: string;
|
|
32
|
+
params: Record<string, unknown>;
|
|
33
|
+
};
|
|
34
|
+
}
|
|
35
|
+
export declare function sql<T = any>(strings: TemplateStringsArray, ...args: any[]): SQLExpression<T>;
|
|
36
|
+
export declare namespace sql {
|
|
37
|
+
var raw: (rawSql: string) => SQLExpression;
|
|
38
|
+
var join: (expressions: (SQLExpression | ClickHouseColumn | SQLValue)[], separator?: SQLExpression | string) => SQLExpression;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Typed SQL helper that preserves types for columns from a table definition.
|
|
42
|
+
*/
|
|
43
|
+
export declare function typedSQL<T extends Record<string, ClickHouseColumn>>(strings: TemplateStringsArray, ...args: Array<SQLValue | SQLExpression | T[keyof T]>): SQL<any>;
|
|
44
|
+
/**
|
|
45
|
+
* Generic function call helper for any ClickHouse function
|
|
46
|
+
* @param name Function name
|
|
47
|
+
* @param args Function arguments
|
|
48
|
+
* @example fn('hex', md5(users.email))
|
|
49
|
+
* @example fn('length', users.interests)
|
|
50
|
+
*/
|
|
51
|
+
export declare function fn(name: string, ...args: (ClickHouseColumn | SQLExpression | SQLValue)[]): SQLExpression;
|
|
52
|
+
export declare function eq<T>(col: ClickHouseColumn<T, any, any> | SQLExpression, val: T | SQLValue | ClickHouseColumn | SQLExpression): SQLExpression<any>;
|
|
53
|
+
export declare function ne<T>(col: ClickHouseColumn<T, any, any> | SQLExpression, val: T | SQLValue | ClickHouseColumn | SQLExpression): SQLExpression<any>;
|
|
54
|
+
export declare function gt<T>(col: ClickHouseColumn<T, any, any> | SQLExpression, val: T | SQLValue | ClickHouseColumn | SQLExpression): SQLExpression<any>;
|
|
55
|
+
export declare function gte<T>(col: ClickHouseColumn<T, any, any> | SQLExpression, val: T | SQLValue | ClickHouseColumn | SQLExpression): SQLExpression<any>;
|
|
56
|
+
export declare function lt<T>(col: ClickHouseColumn<T, any, any> | SQLExpression, val: T | SQLValue | ClickHouseColumn | SQLExpression): SQLExpression<any>;
|
|
57
|
+
export declare function lte<T>(col: ClickHouseColumn<T, any, any> | SQLExpression, val: T | SQLValue | ClickHouseColumn | SQLExpression): SQLExpression<any>;
|
|
58
|
+
export declare function inArray(col: ClickHouseColumn | SQLExpression, values: SQLValue[] | SQLExpression): SQLExpression<any>;
|
|
59
|
+
export declare function notInArray(col: ClickHouseColumn | SQLExpression, values: SQLValue[] | SQLExpression): SQLExpression<any>;
|
|
60
|
+
export declare function between(col: ClickHouseColumn | SQLExpression, min: SQLValue, max: SQLValue): SQLExpression<any>;
|
|
61
|
+
export declare function notBetween(col: ClickHouseColumn | SQLExpression, min: SQLValue, max: SQLValue): SQLExpression<any>;
|
|
62
|
+
export declare function has(col: ClickHouseColumn | SQLExpression, value: SQLValue): SQLExpression<any>;
|
|
63
|
+
export declare function hasAll(col: ClickHouseColumn | SQLExpression, values: SQLValue[]): SQLExpression<any>;
|
|
64
|
+
export declare function hasAny(col: ClickHouseColumn | SQLExpression, values: SQLValue[]): SQLExpression<any>;
|
|
65
|
+
export declare function asc(col: ClickHouseColumn | SQLExpression): {
|
|
66
|
+
col: ClickHouseColumn<any, true, false> | SQLExpression<any>;
|
|
67
|
+
dir: "ASC";
|
|
68
|
+
};
|
|
69
|
+
export declare function desc(col: ClickHouseColumn | SQLExpression): {
|
|
70
|
+
col: ClickHouseColumn<any, true, false> | SQLExpression<any>;
|
|
71
|
+
dir: "DESC";
|
|
72
|
+
};
|