@stonyx/orm 0.2.5-alpha.0 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +482 -15
- package/config/environment.js +63 -6
- package/dist/aggregates.d.ts +21 -0
- package/dist/aggregates.js +93 -0
- package/dist/attr.d.ts +2 -0
- package/dist/attr.js +22 -0
- package/dist/belongs-to.d.ts +11 -0
- package/dist/belongs-to.js +59 -0
- package/dist/cli.d.ts +22 -0
- package/dist/cli.js +148 -0
- package/dist/commands.d.ts +7 -0
- package/dist/commands.js +146 -0
- package/dist/db.d.ts +21 -0
- package/dist/db.js +180 -0
- package/dist/exports/db.d.ts +7 -0
- package/{src → dist}/exports/db.js +2 -4
- package/dist/has-many.d.ts +11 -0
- package/dist/has-many.js +58 -0
- package/dist/hooks.d.ts +75 -0
- package/dist/hooks.js +110 -0
- package/dist/index.d.ts +14 -0
- package/dist/index.js +34 -0
- package/dist/main.d.ts +46 -0
- package/dist/main.js +181 -0
- package/dist/manage-record.d.ts +13 -0
- package/dist/manage-record.js +123 -0
- package/dist/meta-request.d.ts +6 -0
- package/dist/meta-request.js +52 -0
- package/dist/migrate.d.ts +2 -0
- package/dist/migrate.js +57 -0
- package/dist/model-property.d.ts +9 -0
- package/dist/model-property.js +29 -0
- package/dist/model.d.ts +15 -0
- package/dist/model.js +18 -0
- package/dist/mysql/connection.d.ts +14 -0
- package/dist/mysql/connection.js +24 -0
- package/dist/mysql/migration-generator.d.ts +45 -0
- package/dist/mysql/migration-generator.js +254 -0
- package/dist/mysql/migration-runner.d.ts +12 -0
- package/dist/mysql/migration-runner.js +88 -0
- package/dist/mysql/mysql-db.d.ts +100 -0
- package/dist/mysql/mysql-db.js +425 -0
- package/dist/mysql/query-builder.d.ts +10 -0
- package/dist/mysql/query-builder.js +44 -0
- package/dist/mysql/schema-introspector.d.ts +19 -0
- package/dist/mysql/schema-introspector.js +257 -0
- package/dist/mysql/type-map.d.ts +21 -0
- package/dist/mysql/type-map.js +36 -0
- package/dist/orm-request.d.ts +38 -0
- package/dist/orm-request.js +475 -0
- package/dist/plural-registry.d.ts +4 -0
- package/dist/plural-registry.js +9 -0
- package/dist/postgres/connection.d.ts +15 -0
- package/dist/postgres/connection.js +32 -0
- package/dist/postgres/migration-generator.d.ts +45 -0
- package/dist/postgres/migration-generator.js +280 -0
- package/dist/postgres/migration-runner.d.ts +10 -0
- package/dist/postgres/migration-runner.js +87 -0
- package/dist/postgres/postgres-db.d.ts +119 -0
- package/dist/postgres/postgres-db.js +477 -0
- package/dist/postgres/query-builder.d.ts +27 -0
- package/dist/postgres/query-builder.js +98 -0
- package/dist/postgres/schema-introspector.d.ts +29 -0
- package/dist/postgres/schema-introspector.js +296 -0
- package/dist/postgres/type-map.d.ts +23 -0
- package/dist/postgres/type-map.js +56 -0
- package/dist/record.d.ts +75 -0
- package/dist/record.js +129 -0
- package/dist/relationships.d.ts +10 -0
- package/dist/relationships.js +41 -0
- package/dist/schema-helpers.d.ts +20 -0
- package/dist/schema-helpers.js +48 -0
- package/dist/serializer.d.ts +17 -0
- package/dist/serializer.js +136 -0
- package/dist/setup-rest-server.d.ts +1 -0
- package/dist/setup-rest-server.js +52 -0
- package/dist/standalone-db.d.ts +58 -0
- package/dist/standalone-db.js +142 -0
- package/dist/store.d.ts +62 -0
- package/dist/store.js +286 -0
- package/dist/timescale/query-builder.d.ts +43 -0
- package/dist/timescale/query-builder.js +115 -0
- package/dist/timescale/timescale-db.d.ts +45 -0
- package/dist/timescale/timescale-db.js +84 -0
- package/dist/transforms.d.ts +2 -0
- package/dist/transforms.js +17 -0
- package/dist/types/orm-types.d.ts +153 -0
- package/dist/types/orm-types.js +1 -0
- package/dist/utils.d.ts +7 -0
- package/dist/utils.js +17 -0
- package/dist/view-resolver.d.ts +8 -0
- package/dist/view-resolver.js +171 -0
- package/dist/view.d.ts +11 -0
- package/dist/view.js +18 -0
- package/package.json +64 -11
- package/src/aggregates.ts +109 -0
- package/src/{attr.js → attr.ts} +2 -2
- package/src/belongs-to.ts +90 -0
- package/src/cli.ts +183 -0
- package/src/commands.ts +179 -0
- package/src/db.ts +232 -0
- package/src/exports/db.ts +7 -0
- package/src/has-many.ts +92 -0
- package/src/hooks.ts +151 -0
- package/src/{index.js → index.ts} +12 -2
- package/src/main.ts +229 -0
- package/src/manage-record.ts +161 -0
- package/src/{meta-request.js → meta-request.ts} +17 -14
- package/src/migrate.ts +72 -0
- package/src/model-property.ts +35 -0
- package/src/model.ts +21 -0
- package/src/mysql/connection.ts +43 -0
- package/src/mysql/migration-generator.ts +337 -0
- package/src/mysql/migration-runner.ts +121 -0
- package/src/mysql/mysql-db.ts +543 -0
- package/src/mysql/query-builder.ts +69 -0
- package/src/mysql/schema-introspector.ts +310 -0
- package/src/mysql/type-map.ts +42 -0
- package/src/orm-request.ts +582 -0
- package/src/plural-registry.ts +12 -0
- package/src/postgres/connection.ts +48 -0
- package/src/postgres/migration-generator.ts +370 -0
- package/src/postgres/migration-runner.ts +115 -0
- package/src/postgres/postgres-db.ts +616 -0
- package/src/postgres/query-builder.ts +148 -0
- package/src/postgres/schema-introspector.ts +360 -0
- package/src/postgres/type-map.ts +61 -0
- package/src/record.ts +186 -0
- package/src/relationships.ts +54 -0
- package/src/schema-helpers.ts +59 -0
- package/src/serializer.ts +161 -0
- package/src/setup-rest-server.ts +62 -0
- package/src/standalone-db.ts +185 -0
- package/src/store.ts +373 -0
- package/src/timescale/query-builder.ts +174 -0
- package/src/timescale/timescale-db.ts +119 -0
- package/src/transforms.ts +20 -0
- package/src/types/mysql2.d.ts +49 -0
- package/src/types/orm-types.ts +158 -0
- package/src/types/pg.d.ts +32 -0
- package/src/types/stonyx-cron.d.ts +5 -0
- package/src/types/stonyx-events.d.ts +4 -0
- package/src/types/stonyx-rest-server.d.ts +16 -0
- package/src/types/stonyx-utils.d.ts +33 -0
- package/src/types/stonyx.d.ts +21 -0
- package/src/utils.ts +22 -0
- package/src/view-resolver.ts +211 -0
- package/src/view.ts +22 -0
- package/.claude/project-structure.md +0 -578
- package/.github/workflows/ci.yml +0 -36
- package/.github/workflows/publish.yml +0 -143
- package/src/belongs-to.js +0 -63
- package/src/db.js +0 -80
- package/src/has-many.js +0 -61
- package/src/main.js +0 -119
- package/src/manage-record.js +0 -103
- package/src/model-property.js +0 -29
- package/src/model.js +0 -9
- package/src/orm-request.js +0 -249
- package/src/record.js +0 -100
- package/src/relationships.js +0 -43
- package/src/serializer.js +0 -138
- package/src/setup-rest-server.js +0 -57
- package/src/store.js +0 -211
- package/src/transforms.js +0 -20
- package/stonyx-bootstrap.cjs +0 -30
package/dist/store.js
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
import Orm, { relationships } from '@stonyx/orm';
|
|
2
|
+
import { TYPES, getHasManyRegistry, getBelongsToRegistry, getPendingRegistry } from './relationships.js';
|
|
3
|
+
import ViewResolver from './view-resolver.js';
|
|
4
|
+
function isStoreRecord(value) {
|
|
5
|
+
return typeof value === 'object' && value !== null && '__data' in value;
|
|
6
|
+
}
|
|
7
|
+
export default class Store {
|
|
8
|
+
static instance;
|
|
9
|
+
data = new Map();
|
|
10
|
+
/**
|
|
11
|
+
* Set by Orm during init — resolves memory flag for a model name.
|
|
12
|
+
*/
|
|
13
|
+
_memoryResolver = null;
|
|
14
|
+
/**
|
|
15
|
+
* Set by Orm during init — reference to the SQL adapter instance for on-demand queries.
|
|
16
|
+
*/
|
|
17
|
+
_sqlDb = null;
|
|
18
|
+
constructor() {
|
|
19
|
+
if (Store.instance)
|
|
20
|
+
return Store.instance;
|
|
21
|
+
Store.instance = this;
|
|
22
|
+
this.data = new Map();
|
|
23
|
+
}
|
|
24
|
+
get(key, id) {
|
|
25
|
+
if (!id)
|
|
26
|
+
return this.data.get(key);
|
|
27
|
+
return this.data.get(key)?.get(id);
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Async authoritative read. Always queries the SQL database for memory: false models.
|
|
31
|
+
* For memory: true models, returns from store (already loaded on boot).
|
|
32
|
+
*/
|
|
33
|
+
async find(modelName, id) {
|
|
34
|
+
// For views in non-SQL mode, use view resolver
|
|
35
|
+
if (Orm.instance?.isView?.(modelName) && !this._sqlDb) {
|
|
36
|
+
const resolver = new ViewResolver(modelName);
|
|
37
|
+
return resolver.resolveOne(id);
|
|
38
|
+
}
|
|
39
|
+
// For memory: true models, the store is authoritative
|
|
40
|
+
if (this._isMemoryModel(modelName)) {
|
|
41
|
+
return this.get(modelName, id);
|
|
42
|
+
}
|
|
43
|
+
// For memory: false models, always query the SQL database
|
|
44
|
+
if (this._sqlDb) {
|
|
45
|
+
return this._sqlDb.findRecord(modelName, id);
|
|
46
|
+
}
|
|
47
|
+
// Fallback to store (JSON mode or no SQL adapter)
|
|
48
|
+
return this.get(modelName, id);
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Async read for all records of a model. Always queries MySQL for memory: false models.
|
|
52
|
+
* For memory: true models, returns from store.
|
|
53
|
+
*/
|
|
54
|
+
async findAll(modelName, conditions) {
|
|
55
|
+
// For views in non-SQL mode, use view resolver
|
|
56
|
+
if (Orm.instance?.isView?.(modelName) && !this._sqlDb) {
|
|
57
|
+
const resolver = new ViewResolver(modelName);
|
|
58
|
+
const records = await resolver.resolveAll();
|
|
59
|
+
if (!conditions || Object.keys(conditions).length === 0)
|
|
60
|
+
return records;
|
|
61
|
+
return records.filter((record) => Object.entries(conditions).every(([key, value]) => isStoreRecord(record) && record.__data[key] === value));
|
|
62
|
+
}
|
|
63
|
+
// For memory: true models without conditions, return from store
|
|
64
|
+
if (this._isMemoryModel(modelName) && !conditions) {
|
|
65
|
+
const modelStore = this.get(modelName);
|
|
66
|
+
return modelStore ? Array.from(modelStore.values()) : [];
|
|
67
|
+
}
|
|
68
|
+
// For memory: false models (or filtered queries), always query the SQL database
|
|
69
|
+
if (this._sqlDb) {
|
|
70
|
+
return this._sqlDb.findAll(modelName, conditions);
|
|
71
|
+
}
|
|
72
|
+
// Fallback to store (JSON mode) — apply conditions in-memory if provided
|
|
73
|
+
const modelStore = this.get(modelName);
|
|
74
|
+
if (!modelStore)
|
|
75
|
+
return [];
|
|
76
|
+
const records = Array.from(modelStore.values());
|
|
77
|
+
if (!conditions || Object.keys(conditions).length === 0)
|
|
78
|
+
return records;
|
|
79
|
+
return records.filter((record) => Object.entries(conditions).every(([key, value]) => isStoreRecord(record) && record.__data[key] === value));
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Async query — always hits MySQL, never reads from memory cache.
|
|
83
|
+
* Use for complex queries, aggregations, or when you need guaranteed freshness.
|
|
84
|
+
*/
|
|
85
|
+
async query(modelName, conditions = {}) {
|
|
86
|
+
if (this._sqlDb) {
|
|
87
|
+
return this._sqlDb.findAll(modelName, conditions);
|
|
88
|
+
}
|
|
89
|
+
// Fallback: filter in-memory store
|
|
90
|
+
const modelStore = this.get(modelName);
|
|
91
|
+
if (!modelStore)
|
|
92
|
+
return [];
|
|
93
|
+
const records = Array.from(modelStore.values());
|
|
94
|
+
if (Object.keys(conditions).length === 0)
|
|
95
|
+
return records;
|
|
96
|
+
return records.filter((record) => Object.entries(conditions).every(([key, value]) => isStoreRecord(record) && record.__data[key] === value));
|
|
97
|
+
}
|
|
98
|
+
/**
|
|
99
|
+
* Check if a model is configured for in-memory storage.
|
|
100
|
+
* @private
|
|
101
|
+
*/
|
|
102
|
+
_isMemoryModel(modelName) {
|
|
103
|
+
if (this._memoryResolver)
|
|
104
|
+
return this._memoryResolver(modelName);
|
|
105
|
+
return false; // default to non-memory if resolver not set yet
|
|
106
|
+
}
|
|
107
|
+
set(key, value) {
|
|
108
|
+
this.data.set(key, value);
|
|
109
|
+
}
|
|
110
|
+
remove(key, id) {
|
|
111
|
+
// Guard: read-only views cannot have records removed
|
|
112
|
+
if (Orm.instance?.isView?.(key)) {
|
|
113
|
+
throw new Error(`Cannot remove records from read-only view '${key}'`);
|
|
114
|
+
}
|
|
115
|
+
if (id)
|
|
116
|
+
return this.unloadRecord(key, id);
|
|
117
|
+
this.unloadAllRecords(key);
|
|
118
|
+
}
|
|
119
|
+
unloadRecord(model, id, options = {}) {
|
|
120
|
+
const modelStore = this.data.get(model);
|
|
121
|
+
if (!modelStore) {
|
|
122
|
+
console.warn(`[Store] Cannot unload record: model "${model}" not found in store — ensure the model is registered before unloading`);
|
|
123
|
+
return;
|
|
124
|
+
}
|
|
125
|
+
if (typeof id !== 'string' && typeof id !== 'number')
|
|
126
|
+
return;
|
|
127
|
+
const raw = modelStore.get(id);
|
|
128
|
+
if (!raw || !isStoreRecord(raw)) {
|
|
129
|
+
console.warn(`[Store] Cannot unload record: ${model}:${id} not found in store — it may have already been unloaded`);
|
|
130
|
+
return;
|
|
131
|
+
}
|
|
132
|
+
const record = raw;
|
|
133
|
+
const { toUnload, visited } = options.includeChildren
|
|
134
|
+
? this._buildUnloadQueue(record, options)
|
|
135
|
+
: { toUnload: [{ record, modelName: model, recordId: id }], visited: new Set([`${model}:${id}`]) };
|
|
136
|
+
for (const item of toUnload.reverse()) {
|
|
137
|
+
const { record: recordToUnload, modelName, recordId } = item;
|
|
138
|
+
this._removeFromHasManyArrays(modelName, recordId, visited);
|
|
139
|
+
this._nullifyBelongsToReferences(modelName, recordId, visited);
|
|
140
|
+
this._cleanupRelationshipRegistries(modelName, recordId);
|
|
141
|
+
recordToUnload.clean();
|
|
142
|
+
this.data.get(modelName)?.delete(recordId);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
unloadAllRecords(model, options = {}) {
|
|
146
|
+
const modelStore = this.data.get(model);
|
|
147
|
+
if (!modelStore) {
|
|
148
|
+
console.warn(`[Store] Cannot unload all records: model "${model}" not found in store — ensure the model is registered before unloading`);
|
|
149
|
+
return;
|
|
150
|
+
}
|
|
151
|
+
const recordIds = Array.from(modelStore.keys());
|
|
152
|
+
for (const id of recordIds) {
|
|
153
|
+
if (modelStore.has(id)) {
|
|
154
|
+
this.unloadRecord(model, id, options);
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
for (const relationshipType of TYPES) {
|
|
158
|
+
const reg = relationships.get(relationshipType);
|
|
159
|
+
if (reg instanceof Map)
|
|
160
|
+
reg.delete(model);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
_removeFromHasManyArrays(modelName, recordId, visited) {
|
|
164
|
+
const hasManyRegistry = getHasManyRegistry();
|
|
165
|
+
for (const [sourceModel, targetModels] of hasManyRegistry) {
|
|
166
|
+
const targetModelMap = targetModels.get(modelName);
|
|
167
|
+
if (!targetModelMap)
|
|
168
|
+
continue;
|
|
169
|
+
for (const [sourceRecordId, hasManyArray] of targetModelMap) {
|
|
170
|
+
const sourceKey = `${sourceModel}:${sourceRecordId}`;
|
|
171
|
+
// Don't modify arrays of records being deleted
|
|
172
|
+
if (visited.has(sourceKey))
|
|
173
|
+
continue;
|
|
174
|
+
const index = hasManyArray.findIndex(r => r && isStoreRecord(r) && r.id === recordId);
|
|
175
|
+
if (index !== -1)
|
|
176
|
+
hasManyArray.splice(index, 1);
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
_nullifyBelongsToReferences(modelName, recordId, visited) {
|
|
181
|
+
const belongsToRegistry = getBelongsToRegistry();
|
|
182
|
+
for (const [sourceModel, targetModels] of belongsToRegistry) {
|
|
183
|
+
const targetModelMap = targetModels.get(modelName);
|
|
184
|
+
if (!targetModelMap)
|
|
185
|
+
continue;
|
|
186
|
+
for (const [sourceRecordId, belongsToRecord] of targetModelMap) {
|
|
187
|
+
if (belongsToRecord && isStoreRecord(belongsToRecord) && belongsToRecord.id === recordId) {
|
|
188
|
+
const sourceKey = `${sourceModel}:${sourceRecordId}`;
|
|
189
|
+
if (visited.has(sourceKey))
|
|
190
|
+
continue;
|
|
191
|
+
targetModelMap.set(sourceRecordId, null);
|
|
192
|
+
if (typeof sourceRecordId !== 'string' && typeof sourceRecordId !== 'number')
|
|
193
|
+
continue;
|
|
194
|
+
const sourceRaw = this.get(sourceModel, sourceRecordId);
|
|
195
|
+
if (!sourceRaw || !isStoreRecord(sourceRaw))
|
|
196
|
+
continue;
|
|
197
|
+
if (sourceRaw.__relationships) {
|
|
198
|
+
for (const [key, value] of Object.entries(sourceRaw.__relationships)) {
|
|
199
|
+
if (value && isStoreRecord(value) && value.id === recordId) {
|
|
200
|
+
sourceRaw.__relationships[key] = null;
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
_cleanupRelationshipRegistries(modelName, recordId) {
|
|
209
|
+
const hasManyMap = getHasManyRegistry().get(modelName);
|
|
210
|
+
if (hasManyMap) {
|
|
211
|
+
for (const [, recordMap] of hasManyMap)
|
|
212
|
+
recordMap.delete(recordId);
|
|
213
|
+
}
|
|
214
|
+
const belongsToMap = getBelongsToRegistry().get(modelName);
|
|
215
|
+
if (belongsToMap) {
|
|
216
|
+
for (const [, recordMap] of belongsToMap)
|
|
217
|
+
recordMap.delete(recordId);
|
|
218
|
+
}
|
|
219
|
+
const pendingMap = getPendingRegistry().get(modelName);
|
|
220
|
+
if (pendingMap)
|
|
221
|
+
pendingMap.delete(recordId);
|
|
222
|
+
}
|
|
223
|
+
/**
|
|
224
|
+
* Extracts hasMany and non-bidirectional belongsTo children from a record
|
|
225
|
+
* @private
|
|
226
|
+
*/
|
|
227
|
+
_getChildren(record) {
|
|
228
|
+
const children = [];
|
|
229
|
+
if (!record.__relationships)
|
|
230
|
+
return children;
|
|
231
|
+
for (const [key, value] of Object.entries(record.__relationships)) {
|
|
232
|
+
// hasMany children - always include
|
|
233
|
+
if (Array.isArray(value)) {
|
|
234
|
+
for (const childRecord of value) {
|
|
235
|
+
if (childRecord && isStoreRecord(childRecord))
|
|
236
|
+
children.push({ childRecord, relationshipKey: key, type: 'hasMany' });
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
else if (value && isStoreRecord(value) && value.__model && !this._isBidirectionalRelationship(record.__model.__name, value.__model.__name)) {
|
|
240
|
+
children.push({ childRecord: value, relationshipKey: key, type: 'belongsTo' });
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
return children;
|
|
244
|
+
}
|
|
245
|
+
_isBidirectionalRelationship(sourceModel, targetModel) {
|
|
246
|
+
const inverseMap = getHasManyRegistry().get(targetModel)?.get(sourceModel);
|
|
247
|
+
return !!inverseMap && inverseMap.size > 0;
|
|
248
|
+
}
|
|
249
|
+
_buildUnloadQueue(record, options) {
|
|
250
|
+
const visited = new Set();
|
|
251
|
+
const toUnload = [];
|
|
252
|
+
const queue = [{
|
|
253
|
+
record,
|
|
254
|
+
modelName: record.__model.__name,
|
|
255
|
+
recordId: record.id,
|
|
256
|
+
isRoot: true,
|
|
257
|
+
depth: 0
|
|
258
|
+
}];
|
|
259
|
+
while (queue.length > 0) {
|
|
260
|
+
const item = queue.shift();
|
|
261
|
+
if (!item)
|
|
262
|
+
break;
|
|
263
|
+
const key = `${item.modelName}:${item.recordId}`;
|
|
264
|
+
if (visited.has(key))
|
|
265
|
+
continue;
|
|
266
|
+
visited.add(key);
|
|
267
|
+
toUnload.push(item);
|
|
268
|
+
// Add children to queue if includeChildren is enabled
|
|
269
|
+
if (options.includeChildren) {
|
|
270
|
+
const children = this._getChildren(item.record);
|
|
271
|
+
for (const { childRecord } of children) {
|
|
272
|
+
if (childRecord) {
|
|
273
|
+
queue.push({
|
|
274
|
+
record: childRecord,
|
|
275
|
+
modelName: childRecord.__model.__name,
|
|
276
|
+
recordId: childRecord.id,
|
|
277
|
+
isRoot: false,
|
|
278
|
+
depth: (item.depth ?? 0) + 1
|
|
279
|
+
});
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
return { toUnload, visited };
|
|
285
|
+
}
|
|
286
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
export { validateIdentifier, buildInsert, buildUpdate, buildDelete, buildSelect } from '../postgres/query-builder.js';
|
|
2
|
+
export declare function validateInterval(interval: string, context?: string): string;
|
|
3
|
+
export declare function validateAggregate(expr: string, context?: string): string;
|
|
4
|
+
interface QueryResult {
|
|
5
|
+
sql: string;
|
|
6
|
+
values: unknown[];
|
|
7
|
+
}
|
|
8
|
+
interface SqlResult {
|
|
9
|
+
sql: string;
|
|
10
|
+
}
|
|
11
|
+
interface HypertableOptions {
|
|
12
|
+
chunkInterval?: string;
|
|
13
|
+
}
|
|
14
|
+
interface TimeBucketOptions {
|
|
15
|
+
aggregates?: string[];
|
|
16
|
+
where?: Record<string, unknown>;
|
|
17
|
+
orderBy?: string;
|
|
18
|
+
limit?: number;
|
|
19
|
+
}
|
|
20
|
+
interface ContinuousAggregateOptions {
|
|
21
|
+
withNoData?: boolean;
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Build a CREATE TABLE + hypertable conversion statement.
|
|
25
|
+
* TimescaleDB hypertables are regular tables converted via create_hypertable().
|
|
26
|
+
*/
|
|
27
|
+
export declare function buildCreateHypertable(table: string, timeColumn: string, options?: HypertableOptions): QueryResult;
|
|
28
|
+
/**
|
|
29
|
+
* Build a time_bucket aggregation query.
|
|
30
|
+
*/
|
|
31
|
+
export declare function buildTimeBucket(table: string, timeColumn: string, bucketSize: string, options?: TimeBucketOptions): QueryResult;
|
|
32
|
+
/**
|
|
33
|
+
* Build a continuous aggregate creation statement.
|
|
34
|
+
*/
|
|
35
|
+
export declare function buildContinuousAggregate(viewName: string, table: string, timeColumn: string, bucketSize: string, aggregates: string[], options?: ContinuousAggregateOptions): SqlResult;
|
|
36
|
+
/**
|
|
37
|
+
* Build an ADD compression policy statement.
|
|
38
|
+
*/
|
|
39
|
+
export declare function buildCompressionPolicy(table: string, compressAfter: string): SqlResult;
|
|
40
|
+
/**
|
|
41
|
+
* Build an ALTER TABLE to enable compression on a hypertable.
|
|
42
|
+
*/
|
|
43
|
+
export declare function buildEnableCompression(table: string, segmentBy?: string, orderBy?: string): SqlResult;
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
// Re-export all base PostgreSQL query builders
|
|
2
|
+
export { validateIdentifier, buildInsert, buildUpdate, buildDelete, buildSelect } from '../postgres/query-builder.js';
|
|
3
|
+
import { validateIdentifier } from '../postgres/query-builder.js';
|
|
4
|
+
const SAFE_INTERVAL = /^\d+\s+(microsecond|millisecond|second|minute|hour|day|week|month|year)s?$/i;
|
|
5
|
+
export function validateInterval(interval, context = 'interval') {
|
|
6
|
+
if (!interval || typeof interval !== 'string' || !SAFE_INTERVAL.test(interval.trim())) {
|
|
7
|
+
throw new Error(`Invalid SQL ${context}: "${interval}". Intervals must match pattern like "7 days", "1 hour", "30 minutes".`);
|
|
8
|
+
}
|
|
9
|
+
return interval.trim();
|
|
10
|
+
}
|
|
11
|
+
const SAFE_AGGREGATE = /^(COUNT|SUM|AVG|MIN|MAX|FIRST|LAST)\s*\(\s*("?[a-zA-Z_][a-zA-Z0-9_]*"?|\*)\s*\)\s*(AS\s+"?[a-zA-Z_][a-zA-Z0-9_]*"?)?$/i;
|
|
12
|
+
export function validateAggregate(expr, context = 'aggregate') {
|
|
13
|
+
if (!expr || typeof expr !== 'string' || !SAFE_AGGREGATE.test(expr.trim())) {
|
|
14
|
+
throw new Error(`Invalid SQL ${context}: "${expr}". Aggregates must be simple function calls like "AVG(value) AS avg_value".`);
|
|
15
|
+
}
|
|
16
|
+
return expr.trim();
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Build a CREATE TABLE + hypertable conversion statement.
|
|
20
|
+
* TimescaleDB hypertables are regular tables converted via create_hypertable().
|
|
21
|
+
*/
|
|
22
|
+
export function buildCreateHypertable(table, timeColumn, options = {}) {
|
|
23
|
+
validateIdentifier(table, 'table name');
|
|
24
|
+
validateIdentifier(timeColumn, 'column name');
|
|
25
|
+
const { chunkInterval = '7 days' } = options;
|
|
26
|
+
validateInterval(chunkInterval, 'chunk interval');
|
|
27
|
+
const sql = `SELECT create_hypertable('"${table}"', '${timeColumn}', chunk_time_interval => INTERVAL '${chunkInterval}', if_not_exists => TRUE)`;
|
|
28
|
+
return { sql, values: [] };
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Build a time_bucket aggregation query.
|
|
32
|
+
*/
|
|
33
|
+
export function buildTimeBucket(table, timeColumn, bucketSize, options = {}) {
|
|
34
|
+
validateIdentifier(table, 'table name');
|
|
35
|
+
validateIdentifier(timeColumn, 'column name');
|
|
36
|
+
const { aggregates = [], where, orderBy = 'bucket', limit } = options;
|
|
37
|
+
const values = [];
|
|
38
|
+
let paramIndex = 1;
|
|
39
|
+
const selectCols = [`time_bucket($${paramIndex++}, "${timeColumn}") AS bucket`];
|
|
40
|
+
values.push(bucketSize);
|
|
41
|
+
for (const agg of aggregates) {
|
|
42
|
+
selectCols.push(validateAggregate(agg));
|
|
43
|
+
}
|
|
44
|
+
const whereClauses = [];
|
|
45
|
+
if (where) {
|
|
46
|
+
for (const [k, v] of Object.entries(where)) {
|
|
47
|
+
validateIdentifier(k, 'column name');
|
|
48
|
+
whereClauses.push(`"${k}" = $${paramIndex++}`);
|
|
49
|
+
values.push(v);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
const whereStr = whereClauses.length > 0 ? ` WHERE ${whereClauses.join(' AND ')}` : '';
|
|
53
|
+
let orderStr = '';
|
|
54
|
+
if (orderBy) {
|
|
55
|
+
const parts = orderBy.trim().split(/\s+/);
|
|
56
|
+
const col = parts[0];
|
|
57
|
+
const dir = parts[1]?.toUpperCase();
|
|
58
|
+
validateIdentifier(col, 'ORDER BY column');
|
|
59
|
+
if (dir && dir !== 'ASC' && dir !== 'DESC') {
|
|
60
|
+
throw new Error(`Invalid ORDER BY direction: "${dir}". Must be ASC or DESC.`);
|
|
61
|
+
}
|
|
62
|
+
orderStr = ` ORDER BY "${col}"${dir ? ` ${dir}` : ''}`;
|
|
63
|
+
}
|
|
64
|
+
let limitStr = '';
|
|
65
|
+
if (limit != null) {
|
|
66
|
+
limitStr = ` LIMIT $${paramIndex++}`;
|
|
67
|
+
values.push(limit);
|
|
68
|
+
}
|
|
69
|
+
const sql = `SELECT ${selectCols.join(', ')} FROM "${table}"${whereStr} GROUP BY bucket${orderStr}${limitStr}`;
|
|
70
|
+
return { sql, values };
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* Build a continuous aggregate creation statement.
|
|
74
|
+
*/
|
|
75
|
+
export function buildContinuousAggregate(viewName, table, timeColumn, bucketSize, aggregates, options = {}) {
|
|
76
|
+
validateIdentifier(viewName, 'view name');
|
|
77
|
+
validateIdentifier(table, 'table name');
|
|
78
|
+
validateIdentifier(timeColumn, 'column name');
|
|
79
|
+
const { withNoData = false } = options;
|
|
80
|
+
validateInterval(bucketSize, 'bucket size');
|
|
81
|
+
aggregates.forEach(agg => validateAggregate(agg));
|
|
82
|
+
const selectCols = [
|
|
83
|
+
`time_bucket('${bucketSize}', "${timeColumn}") AS bucket`,
|
|
84
|
+
...aggregates,
|
|
85
|
+
];
|
|
86
|
+
const withClause = withNoData ? ' WITH NO DATA' : '';
|
|
87
|
+
const sql = `CREATE MATERIALIZED VIEW "${viewName}" WITH (timescaledb.continuous) AS SELECT ${selectCols.join(', ')} FROM "${table}" GROUP BY bucket${withClause}`;
|
|
88
|
+
return { sql };
|
|
89
|
+
}
|
|
90
|
+
/**
|
|
91
|
+
* Build an ADD compression policy statement.
|
|
92
|
+
*/
|
|
93
|
+
export function buildCompressionPolicy(table, compressAfter) {
|
|
94
|
+
validateIdentifier(table, 'table name');
|
|
95
|
+
validateInterval(compressAfter, 'compress after interval');
|
|
96
|
+
const sql = `SELECT add_compression_policy('"${table}"', INTERVAL '${compressAfter}', if_not_exists => TRUE)`;
|
|
97
|
+
return { sql };
|
|
98
|
+
}
|
|
99
|
+
/**
|
|
100
|
+
* Build an ALTER TABLE to enable compression on a hypertable.
|
|
101
|
+
*/
|
|
102
|
+
export function buildEnableCompression(table, segmentBy, orderBy) {
|
|
103
|
+
validateIdentifier(table, 'table name');
|
|
104
|
+
let opts = `timescaledb.compress`;
|
|
105
|
+
if (segmentBy) {
|
|
106
|
+
validateIdentifier(segmentBy, 'column name');
|
|
107
|
+
opts += `, timescaledb.compress_segmentby = '"${segmentBy}"'`;
|
|
108
|
+
}
|
|
109
|
+
if (orderBy) {
|
|
110
|
+
validateIdentifier(orderBy, 'column name');
|
|
111
|
+
opts += `, timescaledb.compress_orderby = '"${orderBy}"'`;
|
|
112
|
+
}
|
|
113
|
+
const sql = `ALTER TABLE "${table}" SET (${opts})`;
|
|
114
|
+
return { sql };
|
|
115
|
+
}
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import PostgresDB from '../postgres/postgres-db.js';
|
|
2
|
+
interface HypertableOptions {
|
|
3
|
+
chunkInterval?: string;
|
|
4
|
+
}
|
|
5
|
+
interface TimeBucketOptions {
|
|
6
|
+
aggregates?: string[];
|
|
7
|
+
where?: Record<string, unknown>;
|
|
8
|
+
orderBy?: string;
|
|
9
|
+
limit?: number;
|
|
10
|
+
}
|
|
11
|
+
interface ContinuousAggregateOptions {
|
|
12
|
+
withNoData?: boolean;
|
|
13
|
+
}
|
|
14
|
+
interface CompressionOptions {
|
|
15
|
+
segmentBy?: string;
|
|
16
|
+
orderBy?: string;
|
|
17
|
+
}
|
|
18
|
+
export default class TimescaleDB extends PostgresDB {
|
|
19
|
+
static extensions: string[];
|
|
20
|
+
static configKey: string;
|
|
21
|
+
constructor(deps?: Record<string, unknown>);
|
|
22
|
+
private get tsDeps();
|
|
23
|
+
/**
|
|
24
|
+
* Convert a table to a TimescaleDB hypertable.
|
|
25
|
+
* Should be called after the table is created (e.g. after initial migration).
|
|
26
|
+
*/
|
|
27
|
+
createHypertable(modelName: string, timeColumn: string, options?: HypertableOptions): Promise<void>;
|
|
28
|
+
/**
|
|
29
|
+
* Query time-bucketed aggregations on a hypertable.
|
|
30
|
+
*/
|
|
31
|
+
timeBucket(modelName: string, timeColumn: string, bucketSize: string, options?: TimeBucketOptions): Promise<Record<string, unknown>[]>;
|
|
32
|
+
/**
|
|
33
|
+
* Create a continuous aggregate view on a hypertable.
|
|
34
|
+
*/
|
|
35
|
+
createContinuousAggregate(viewName: string, modelName: string, timeColumn: string, bucketSize: string, aggregates: string[], options?: ContinuousAggregateOptions): Promise<void>;
|
|
36
|
+
/**
|
|
37
|
+
* Enable compression on a hypertable.
|
|
38
|
+
*/
|
|
39
|
+
enableCompression(modelName: string, options?: CompressionOptions): Promise<void>;
|
|
40
|
+
/**
|
|
41
|
+
* Add a compression policy to a hypertable.
|
|
42
|
+
*/
|
|
43
|
+
addCompressionPolicy(modelName: string, compressAfter: string): Promise<void>;
|
|
44
|
+
}
|
|
45
|
+
export {};
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import PostgresDB from '../postgres/postgres-db.js';
|
|
2
|
+
import { isDbError } from '../utils.js';
|
|
3
|
+
import { buildCreateHypertable, buildTimeBucket, buildContinuousAggregate, buildCompressionPolicy, buildEnableCompression } from './query-builder.js';
|
|
4
|
+
export default class TimescaleDB extends PostgresDB {
|
|
5
|
+
static extensions = ['timescaledb'];
|
|
6
|
+
static configKey = 'timescale';
|
|
7
|
+
constructor(deps = {}) {
|
|
8
|
+
super({
|
|
9
|
+
...deps,
|
|
10
|
+
buildCreateHypertable,
|
|
11
|
+
buildTimeBucket,
|
|
12
|
+
buildContinuousAggregate,
|
|
13
|
+
buildCompressionPolicy,
|
|
14
|
+
buildEnableCompression,
|
|
15
|
+
});
|
|
16
|
+
}
|
|
17
|
+
get tsDeps() {
|
|
18
|
+
return this.deps;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Convert a table to a TimescaleDB hypertable.
|
|
22
|
+
* Should be called after the table is created (e.g. after initial migration).
|
|
23
|
+
*/
|
|
24
|
+
async createHypertable(modelName, timeColumn, options = {}) {
|
|
25
|
+
const schemas = this.deps.introspectModels();
|
|
26
|
+
const schema = schemas[modelName];
|
|
27
|
+
if (!schema)
|
|
28
|
+
throw new Error(`Model '${modelName}' not found`);
|
|
29
|
+
const { sql } = this.tsDeps.buildCreateHypertable(schema.table, timeColumn, options);
|
|
30
|
+
await this.requirePool().query(sql);
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Query time-bucketed aggregations on a hypertable.
|
|
34
|
+
*/
|
|
35
|
+
async timeBucket(modelName, timeColumn, bucketSize, options = {}) {
|
|
36
|
+
const schemas = this.deps.introspectModels();
|
|
37
|
+
const schema = schemas[modelName];
|
|
38
|
+
if (!schema)
|
|
39
|
+
return [];
|
|
40
|
+
const { sql, values } = this.tsDeps.buildTimeBucket(schema.table, timeColumn, bucketSize, options);
|
|
41
|
+
try {
|
|
42
|
+
const result = await this.requirePool().query(sql, values);
|
|
43
|
+
return result.rows;
|
|
44
|
+
}
|
|
45
|
+
catch (error) {
|
|
46
|
+
if (isDbError(error) && error.code === '42P01')
|
|
47
|
+
return [];
|
|
48
|
+
throw error;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
/**
|
|
52
|
+
* Create a continuous aggregate view on a hypertable.
|
|
53
|
+
*/
|
|
54
|
+
async createContinuousAggregate(viewName, modelName, timeColumn, bucketSize, aggregates, options = {}) {
|
|
55
|
+
const schemas = this.deps.introspectModels();
|
|
56
|
+
const schema = schemas[modelName];
|
|
57
|
+
if (!schema)
|
|
58
|
+
throw new Error(`Model '${modelName}' not found`);
|
|
59
|
+
const { sql } = this.tsDeps.buildContinuousAggregate(viewName, schema.table, timeColumn, bucketSize, aggregates, options);
|
|
60
|
+
await this.requirePool().query(sql);
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Enable compression on a hypertable.
|
|
64
|
+
*/
|
|
65
|
+
async enableCompression(modelName, options = {}) {
|
|
66
|
+
const schemas = this.deps.introspectModels();
|
|
67
|
+
const schema = schemas[modelName];
|
|
68
|
+
if (!schema)
|
|
69
|
+
throw new Error(`Model '${modelName}' not found`);
|
|
70
|
+
const { sql } = this.tsDeps.buildEnableCompression(schema.table, options.segmentBy, options.orderBy);
|
|
71
|
+
await this.requirePool().query(sql);
|
|
72
|
+
}
|
|
73
|
+
/**
|
|
74
|
+
* Add a compression policy to a hypertable.
|
|
75
|
+
*/
|
|
76
|
+
async addCompressionPolicy(modelName, compressAfter) {
|
|
77
|
+
const schemas = this.deps.introspectModels();
|
|
78
|
+
const schema = schemas[modelName];
|
|
79
|
+
if (!schema)
|
|
80
|
+
throw new Error(`Model '${modelName}' not found`);
|
|
81
|
+
const { sql } = this.tsDeps.buildCompressionPolicy(schema.table, compressAfter);
|
|
82
|
+
await this.requirePool().query(sql);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { getTimestamp } from "@stonyx/utils/date";
|
|
2
|
+
const transforms = {
|
|
3
|
+
boolean: (value) => typeof value === 'string' ? value.trim().toLowerCase() === 'true' : !!value,
|
|
4
|
+
date: (value) => value ? new Date(value) : null,
|
|
5
|
+
float: (value) => parseFloat(value),
|
|
6
|
+
number: (value) => parseInt(value),
|
|
7
|
+
passthrough: (value) => value,
|
|
8
|
+
string: (value) => String(value),
|
|
9
|
+
timestamp: (value) => getTimestamp(value),
|
|
10
|
+
trim: (value) => value?.trim(),
|
|
11
|
+
uppercase: (value) => value?.toUpperCase(),
|
|
12
|
+
};
|
|
13
|
+
// Math Proxies
|
|
14
|
+
['ceil', 'floor', 'round'].forEach(method => {
|
|
15
|
+
transforms[method] = (value) => Math[method](value);
|
|
16
|
+
});
|
|
17
|
+
export default transforms;
|