@stonyx/orm 0.2.1-beta.82 → 0.2.1-beta.83
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config/environment.js +17 -0
- package/package.json +1 -1
- package/src/main.js +6 -1
- package/src/postgres/connection.js +10 -3
- package/src/postgres/migration-generator.js +2 -2
- package/src/postgres/postgres-db.js +11 -4
- package/src/timescale/query-builder.js +142 -0
- package/src/timescale/timescale-db.js +111 -0
package/config/environment.js
CHANGED
|
@@ -26,6 +26,13 @@ const {
|
|
|
26
26
|
PG_DATABASE,
|
|
27
27
|
PG_CONNECTION_LIMIT,
|
|
28
28
|
PG_MIGRATIONS_DIR,
|
|
29
|
+
TIMESCALE_HOST,
|
|
30
|
+
TIMESCALE_PORT,
|
|
31
|
+
TIMESCALE_USER,
|
|
32
|
+
TIMESCALE_PASSWORD,
|
|
33
|
+
TIMESCALE_DATABASE,
|
|
34
|
+
TIMESCALE_CONNECTION_LIMIT,
|
|
35
|
+
TIMESCALE_MIGRATIONS_DIR,
|
|
29
36
|
} = process.env;
|
|
30
37
|
|
|
31
38
|
export default {
|
|
@@ -67,6 +74,16 @@ export default {
|
|
|
67
74
|
migrationsDir: PG_MIGRATIONS_DIR ?? 'migrations',
|
|
68
75
|
migrationsTable: '__migrations',
|
|
69
76
|
} : undefined,
|
|
77
|
+
timescale: TIMESCALE_HOST ? {
|
|
78
|
+
host: TIMESCALE_HOST ?? 'localhost',
|
|
79
|
+
port: parseInt(TIMESCALE_PORT ?? '5432'),
|
|
80
|
+
user: TIMESCALE_USER ?? 'postgres',
|
|
81
|
+
password: TIMESCALE_PASSWORD ?? '',
|
|
82
|
+
database: TIMESCALE_DATABASE ?? 'stonyx',
|
|
83
|
+
connectionLimit: parseInt(TIMESCALE_CONNECTION_LIMIT ?? '10'),
|
|
84
|
+
migrationsDir: TIMESCALE_MIGRATIONS_DIR ?? 'migrations',
|
|
85
|
+
migrationsTable: '__migrations',
|
|
86
|
+
} : undefined,
|
|
70
87
|
restServer: {
|
|
71
88
|
enabled: ORM_USE_REST_SERVER ?? 'true', // Whether to load restServer for automatic route setup or
|
|
72
89
|
route: ORM_REST_ROUTE ?? '/',
|
package/package.json
CHANGED
package/src/main.js
CHANGED
|
@@ -109,7 +109,12 @@ export default class Orm {
|
|
|
109
109
|
|
|
110
110
|
setup(eventNames);
|
|
111
111
|
|
|
112
|
-
if (config.orm.
|
|
112
|
+
if (config.orm.timescale) {
|
|
113
|
+
const { default: TimescaleDB } = await import('./timescale/timescale-db.js');
|
|
114
|
+
this.sqlDb = new TimescaleDB();
|
|
115
|
+
this.db = this.sqlDb;
|
|
116
|
+
promises.push(this.sqlDb.init());
|
|
117
|
+
} else if (config.orm.postgres) {
|
|
113
118
|
const { default: PostgresDB } = await import('./postgres/postgres-db.js');
|
|
114
119
|
this.sqlDb = new PostgresDB();
|
|
115
120
|
this.db = this.sqlDb;
|
|
@@ -1,6 +1,11 @@
|
|
|
1
1
|
let pool = null;
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
/**
|
|
4
|
+
* Create or return the singleton pg Pool.
|
|
5
|
+
* @param {Object} pgConfig - Connection config (host, port, user, password, database, connectionLimit)
|
|
6
|
+
* @param {string[]} [extensions=['vector']] - PostgreSQL extensions to enable on init
|
|
7
|
+
*/
|
|
8
|
+
export async function getPool(pgConfig, extensions = ['vector']) {
|
|
4
9
|
if (pool) return pool;
|
|
5
10
|
|
|
6
11
|
const { default: pg } = await import('pg');
|
|
@@ -16,8 +21,10 @@ export async function getPool(pgConfig) {
|
|
|
16
21
|
connectionTimeoutMillis: 10000,
|
|
17
22
|
});
|
|
18
23
|
|
|
19
|
-
// Enable
|
|
20
|
-
|
|
24
|
+
// Enable requested PostgreSQL extensions
|
|
25
|
+
for (const ext of extensions) {
|
|
26
|
+
await pool.query(`CREATE EXTENSION IF NOT EXISTS ${ext}`);
|
|
27
|
+
}
|
|
21
28
|
|
|
22
29
|
return pool;
|
|
23
30
|
}
|
|
@@ -4,8 +4,8 @@ import path from 'path';
|
|
|
4
4
|
import config from 'stonyx/config';
|
|
5
5
|
import log from 'stonyx/log';
|
|
6
6
|
|
|
7
|
-
export async function generateMigration(description = 'migration') {
|
|
8
|
-
const { migrationsDir } = config.orm
|
|
7
|
+
export async function generateMigration(description = 'migration', configKey = 'postgres') {
|
|
8
|
+
const { migrationsDir } = config.orm[configKey];
|
|
9
9
|
const rootPath = config.rootPath;
|
|
10
10
|
const migrationsPath = path.resolve(rootPath, migrationsDir);
|
|
11
11
|
|
|
@@ -21,17 +21,24 @@ const defaultDeps = {
|
|
|
21
21
|
};
|
|
22
22
|
|
|
23
23
|
export default class PostgresDB {
|
|
24
|
+
/** @type {string[]} PostgreSQL extensions to enable on pool init. Subclasses can override. */
|
|
25
|
+
static extensions = ['vector'];
|
|
26
|
+
|
|
27
|
+
/** @type {string} Config key under config.orm for this adapter. Subclasses can override. */
|
|
28
|
+
static configKey = 'postgres';
|
|
29
|
+
|
|
24
30
|
constructor(deps = {}) {
|
|
25
|
-
|
|
26
|
-
|
|
31
|
+
const Ctor = this.constructor;
|
|
32
|
+
if (Ctor.instance) return Ctor.instance;
|
|
33
|
+
Ctor.instance = this;
|
|
27
34
|
|
|
28
35
|
this.deps = { ...defaultDeps, ...deps };
|
|
29
36
|
this.pool = null;
|
|
30
|
-
this.pgConfig = this.deps.config.orm.
|
|
37
|
+
this.pgConfig = this.deps.config.orm[Ctor.configKey];
|
|
31
38
|
}
|
|
32
39
|
|
|
33
40
|
async init() {
|
|
34
|
-
this.pool = await this.deps.getPool(this.pgConfig);
|
|
41
|
+
this.pool = await this.deps.getPool(this.pgConfig, this.constructor.extensions);
|
|
35
42
|
await this.deps.ensureMigrationsTable(this.pool, this.pgConfig.migrationsTable);
|
|
36
43
|
await this.loadMemoryRecords();
|
|
37
44
|
}
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
// Re-export all base PostgreSQL query builders
|
|
2
|
+
export { validateIdentifier, buildInsert, buildUpdate, buildDelete, buildSelect } from '../postgres/query-builder.js';
|
|
3
|
+
|
|
4
|
+
import { validateIdentifier } from '../postgres/query-builder.js';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Build a CREATE TABLE + hypertable conversion statement.
|
|
8
|
+
* TimescaleDB hypertables are regular tables converted via create_hypertable().
|
|
9
|
+
* @param {string} table - Table name
|
|
10
|
+
* @param {string} timeColumn - The time-partitioning column (must be a timestamp type)
|
|
11
|
+
* @param {Object} [options]
|
|
12
|
+
* @param {string} [options.chunkInterval='7 days'] - Chunk time interval
|
|
13
|
+
* @returns {{ sql: string, values: any[] }}
|
|
14
|
+
*/
|
|
15
|
+
export function buildCreateHypertable(table, timeColumn, options = {}) {
|
|
16
|
+
validateIdentifier(table, 'table name');
|
|
17
|
+
validateIdentifier(timeColumn, 'column name');
|
|
18
|
+
|
|
19
|
+
const { chunkInterval = '7 days' } = options;
|
|
20
|
+
|
|
21
|
+
const sql = `SELECT create_hypertable('"${table}"', '${timeColumn}', chunk_time_interval => INTERVAL '${chunkInterval}', if_not_exists => TRUE)`;
|
|
22
|
+
|
|
23
|
+
return { sql, values: [] };
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Build a time_bucket aggregation query.
|
|
28
|
+
* @param {string} table - Table name
|
|
29
|
+
* @param {string} timeColumn - Timestamp column to bucket
|
|
30
|
+
* @param {string} bucketSize - Bucket interval (e.g. '1 hour', '5 minutes', '1 day')
|
|
31
|
+
* @param {Object} [options]
|
|
32
|
+
* @param {string[]} [options.aggregates] - Aggregate expressions (e.g. ['AVG("value") AS avg_value'])
|
|
33
|
+
* @param {Object} [options.where] - WHERE conditions
|
|
34
|
+
* @param {string} [options.orderBy='bucket'] - ORDER BY clause
|
|
35
|
+
* @param {number} [options.limit] - LIMIT
|
|
36
|
+
* @returns {{ sql: string, values: any[] }}
|
|
37
|
+
*/
|
|
38
|
+
export function buildTimeBucket(table, timeColumn, bucketSize, options = {}) {
|
|
39
|
+
validateIdentifier(table, 'table name');
|
|
40
|
+
validateIdentifier(timeColumn, 'column name');
|
|
41
|
+
|
|
42
|
+
const { aggregates = [], where, orderBy = 'bucket', limit } = options;
|
|
43
|
+
const values = [];
|
|
44
|
+
let paramIndex = 1;
|
|
45
|
+
|
|
46
|
+
const selectCols = [`time_bucket($${paramIndex++}, "${timeColumn}") AS bucket`];
|
|
47
|
+
values.push(bucketSize);
|
|
48
|
+
|
|
49
|
+
for (const agg of aggregates) {
|
|
50
|
+
selectCols.push(agg);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
let whereClauses = [];
|
|
54
|
+
if (where) {
|
|
55
|
+
for (const [k, v] of Object.entries(where)) {
|
|
56
|
+
validateIdentifier(k, 'column name');
|
|
57
|
+
whereClauses.push(`"${k}" = $${paramIndex++}`);
|
|
58
|
+
values.push(v);
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
const whereStr = whereClauses.length > 0 ? ` WHERE ${whereClauses.join(' AND ')}` : '';
|
|
63
|
+
const orderStr = orderBy ? ` ORDER BY ${orderBy}` : '';
|
|
64
|
+
let limitStr = '';
|
|
65
|
+
if (limit != null) {
|
|
66
|
+
limitStr = ` LIMIT $${paramIndex++}`;
|
|
67
|
+
values.push(limit);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const sql = `SELECT ${selectCols.join(', ')} FROM "${table}"${whereStr} GROUP BY bucket${orderStr}${limitStr}`;
|
|
71
|
+
|
|
72
|
+
return { sql, values };
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Build a continuous aggregate creation statement.
|
|
77
|
+
* @param {string} viewName - Name for the continuous aggregate view
|
|
78
|
+
* @param {string} table - Source hypertable
|
|
79
|
+
* @param {string} timeColumn - Timestamp column
|
|
80
|
+
* @param {string} bucketSize - Bucket interval
|
|
81
|
+
* @param {string[]} aggregates - Aggregate expressions
|
|
82
|
+
* @param {Object} [options]
|
|
83
|
+
* @param {boolean} [options.withNoData=false] - Create without materializing data initially
|
|
84
|
+
* @returns {{ sql: string }}
|
|
85
|
+
*/
|
|
86
|
+
export function buildContinuousAggregate(viewName, table, timeColumn, bucketSize, aggregates, options = {}) {
|
|
87
|
+
validateIdentifier(viewName, 'view name');
|
|
88
|
+
validateIdentifier(table, 'table name');
|
|
89
|
+
validateIdentifier(timeColumn, 'column name');
|
|
90
|
+
|
|
91
|
+
const { withNoData = false } = options;
|
|
92
|
+
|
|
93
|
+
const selectCols = [
|
|
94
|
+
`time_bucket('${bucketSize}', "${timeColumn}") AS bucket`,
|
|
95
|
+
...aggregates,
|
|
96
|
+
];
|
|
97
|
+
|
|
98
|
+
const withClause = withNoData ? ' WITH NO DATA' : '';
|
|
99
|
+
|
|
100
|
+
const sql = `CREATE MATERIALIZED VIEW "${viewName}" WITH (timescaledb.continuous) AS SELECT ${selectCols.join(', ')} FROM "${table}" GROUP BY bucket${withClause}`;
|
|
101
|
+
|
|
102
|
+
return { sql };
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Build an ADD compression policy statement.
|
|
107
|
+
* @param {string} table - Hypertable name
|
|
108
|
+
* @param {string} compressAfter - Interval after which to compress (e.g. '7 days')
|
|
109
|
+
* @returns {{ sql: string }}
|
|
110
|
+
*/
|
|
111
|
+
export function buildCompressionPolicy(table, compressAfter) {
|
|
112
|
+
validateIdentifier(table, 'table name');
|
|
113
|
+
|
|
114
|
+
const sql = `SELECT add_compression_policy('"${table}"', INTERVAL '${compressAfter}', if_not_exists => TRUE)`;
|
|
115
|
+
|
|
116
|
+
return { sql };
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Build an ALTER TABLE to enable compression on a hypertable.
|
|
121
|
+
* @param {string} table - Hypertable name
|
|
122
|
+
* @param {string} segmentBy - Column to segment by (usually the non-time dimension)
|
|
123
|
+
* @param {string} orderBy - Column to order compressed data by (usually the time column)
|
|
124
|
+
* @returns {{ sql: string }}
|
|
125
|
+
*/
|
|
126
|
+
export function buildEnableCompression(table, segmentBy, orderBy) {
|
|
127
|
+
validateIdentifier(table, 'table name');
|
|
128
|
+
|
|
129
|
+
let opts = `timescaledb.compress`;
|
|
130
|
+
if (segmentBy) {
|
|
131
|
+
validateIdentifier(segmentBy, 'column name');
|
|
132
|
+
opts += `, timescaledb.compress_segmentby = '"${segmentBy}"'`;
|
|
133
|
+
}
|
|
134
|
+
if (orderBy) {
|
|
135
|
+
validateIdentifier(orderBy, 'column name');
|
|
136
|
+
opts += `, timescaledb.compress_orderby = '"${orderBy}"'`;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
const sql = `ALTER TABLE "${table}" SET (${opts})`;
|
|
140
|
+
|
|
141
|
+
return { sql };
|
|
142
|
+
}
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import PostgresDB from '../postgres/postgres-db.js';
|
|
2
|
+
import { buildCreateHypertable, buildTimeBucket, buildContinuousAggregate, buildCompressionPolicy, buildEnableCompression } from './query-builder.js';
|
|
3
|
+
|
|
4
|
+
export default class TimescaleDB extends PostgresDB {
|
|
5
|
+
static extensions = ['timescaledb'];
|
|
6
|
+
static configKey = 'timescale';
|
|
7
|
+
|
|
8
|
+
constructor(deps = {}) {
|
|
9
|
+
super({
|
|
10
|
+
...deps,
|
|
11
|
+
buildCreateHypertable,
|
|
12
|
+
buildTimeBucket,
|
|
13
|
+
buildContinuousAggregate,
|
|
14
|
+
buildCompressionPolicy,
|
|
15
|
+
buildEnableCompression,
|
|
16
|
+
});
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Convert a table to a TimescaleDB hypertable.
|
|
21
|
+
* Should be called after the table is created (e.g. after initial migration).
|
|
22
|
+
* @param {string} modelName
|
|
23
|
+
* @param {string} timeColumn - The time-partitioning column
|
|
24
|
+
* @param {Object} [options]
|
|
25
|
+
* @param {string} [options.chunkInterval='7 days']
|
|
26
|
+
*/
|
|
27
|
+
async createHypertable(modelName, timeColumn, options = {}) {
|
|
28
|
+
const schemas = this.deps.introspectModels();
|
|
29
|
+
const schema = schemas[modelName];
|
|
30
|
+
if (!schema) throw new Error(`Model '${modelName}' not found`);
|
|
31
|
+
|
|
32
|
+
const { sql } = this.deps.buildCreateHypertable(schema.table, timeColumn, options);
|
|
33
|
+
await this.pool.query(sql);
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Query time-bucketed aggregations on a hypertable.
|
|
38
|
+
* @param {string} modelName
|
|
39
|
+
* @param {string} timeColumn - Timestamp column to bucket
|
|
40
|
+
* @param {string} bucketSize - Bucket interval (e.g. '1 hour', '5 minutes')
|
|
41
|
+
* @param {Object} [options]
|
|
42
|
+
* @param {string[]} [options.aggregates] - Aggregate expressions
|
|
43
|
+
* @param {Object} [options.where] - WHERE conditions
|
|
44
|
+
* @param {number} [options.limit]
|
|
45
|
+
* @returns {Promise<Object[]>} Rows with bucket + aggregate columns
|
|
46
|
+
*/
|
|
47
|
+
async timeBucket(modelName, timeColumn, bucketSize, options = {}) {
|
|
48
|
+
const schemas = this.deps.introspectModels();
|
|
49
|
+
const schema = schemas[modelName];
|
|
50
|
+
if (!schema) return [];
|
|
51
|
+
|
|
52
|
+
const { sql, values } = this.deps.buildTimeBucket(schema.table, timeColumn, bucketSize, options);
|
|
53
|
+
|
|
54
|
+
try {
|
|
55
|
+
const result = await this.pool.query(sql, values);
|
|
56
|
+
return result.rows;
|
|
57
|
+
} catch (error) {
|
|
58
|
+
if (error.code === '42P01') return [];
|
|
59
|
+
throw error;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Create a continuous aggregate view on a hypertable.
|
|
65
|
+
* @param {string} viewName - Name for the materialized view
|
|
66
|
+
* @param {string} modelName - Source hypertable model
|
|
67
|
+
* @param {string} timeColumn - Timestamp column
|
|
68
|
+
* @param {string} bucketSize - Bucket interval
|
|
69
|
+
* @param {string[]} aggregates - Aggregate expressions
|
|
70
|
+
* @param {Object} [options]
|
|
71
|
+
* @param {boolean} [options.withNoData=false]
|
|
72
|
+
*/
|
|
73
|
+
async createContinuousAggregate(viewName, modelName, timeColumn, bucketSize, aggregates, options = {}) {
|
|
74
|
+
const schemas = this.deps.introspectModels();
|
|
75
|
+
const schema = schemas[modelName];
|
|
76
|
+
if (!schema) throw new Error(`Model '${modelName}' not found`);
|
|
77
|
+
|
|
78
|
+
const { sql } = this.deps.buildContinuousAggregate(viewName, schema.table, timeColumn, bucketSize, aggregates, options);
|
|
79
|
+
await this.pool.query(sql);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* Enable compression on a hypertable.
|
|
84
|
+
* @param {string} modelName
|
|
85
|
+
* @param {Object} [options]
|
|
86
|
+
* @param {string} [options.segmentBy] - Column to segment by
|
|
87
|
+
* @param {string} [options.orderBy] - Column to order by
|
|
88
|
+
*/
|
|
89
|
+
async enableCompression(modelName, options = {}) {
|
|
90
|
+
const schemas = this.deps.introspectModels();
|
|
91
|
+
const schema = schemas[modelName];
|
|
92
|
+
if (!schema) throw new Error(`Model '${modelName}' not found`);
|
|
93
|
+
|
|
94
|
+
const { sql } = this.deps.buildEnableCompression(schema.table, options.segmentBy, options.orderBy);
|
|
95
|
+
await this.pool.query(sql);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
/**
|
|
99
|
+
* Add a compression policy to a hypertable.
|
|
100
|
+
* @param {string} modelName
|
|
101
|
+
* @param {string} compressAfter - Interval after which to compress (e.g. '7 days')
|
|
102
|
+
*/
|
|
103
|
+
async addCompressionPolicy(modelName, compressAfter) {
|
|
104
|
+
const schemas = this.deps.introspectModels();
|
|
105
|
+
const schema = schemas[modelName];
|
|
106
|
+
if (!schema) throw new Error(`Model '${modelName}' not found`);
|
|
107
|
+
|
|
108
|
+
const { sql } = this.deps.buildCompressionPolicy(schema.table, compressAfter);
|
|
109
|
+
await this.pool.query(sql);
|
|
110
|
+
}
|
|
111
|
+
}
|