@mikro-orm/sql 7.1.0-dev.15 → 7.1.0-dev.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,6 +15,7 @@ export declare class BasePostgreSqlPlatform extends AbstractSqlPlatform {
15
15
  usesEnumCheckConstraints(): boolean;
16
16
  getEnumArrayCheckConstraintExpression(column: string, items: string[]): string;
17
17
  supportsMaterializedViews(): boolean;
18
+ supportsPartitionedTables(): boolean;
18
19
  supportsCustomPrimaryKeyNames(): boolean;
19
20
  getCurrentTimestampSQL(length: number): string;
20
21
  getDateTimeTypeDeclarationSQL(column: {
@@ -30,6 +30,9 @@ export class BasePostgreSqlPlatform extends AbstractSqlPlatform {
30
30
  supportsMaterializedViews() {
31
31
  return true;
32
32
  }
33
+ supportsPartitionedTables() {
34
+ return true;
35
+ }
33
36
  supportsCustomPrimaryKeyNames() {
34
37
  return true;
35
38
  }
@@ -1,7 +1,7 @@
1
1
  import { type Dictionary, type Transaction } from '@mikro-orm/core';
2
2
  import { SchemaHelper } from '../../schema/SchemaHelper.js';
3
3
  import type { AbstractSqlConnection } from '../../AbstractSqlConnection.js';
4
- import type { CheckDef, Column, ForeignKey, IndexDef, Table, TableDifference, SqlTriggerDef } from '../../typings.js';
4
+ import type { CheckDef, Column, ForeignKey, IndexDef, Table, TableDifference, TablePartitioning, SqlTriggerDef } from '../../typings.js';
5
5
  import type { DatabaseSchema } from '../../schema/DatabaseSchema.js';
6
6
  import type { DatabaseTable } from '../../schema/DatabaseTable.js';
7
7
  export declare class PostgreSqlSchemaHelper extends SchemaHelper {
@@ -25,11 +25,22 @@ export declare class PostgreSqlSchemaHelper extends SchemaHelper {
25
25
  getListMaterializedViewsSQL(): string;
26
26
  loadMaterializedViews(schema: DatabaseSchema, connection: AbstractSqlConnection, schemaName?: string, ctx?: Transaction): Promise<void>;
27
27
  createMaterializedView(name: string, schema: string | undefined, definition: string, withData?: boolean): string;
28
+ createTable(table: DatabaseTable, alter?: boolean): string[];
28
29
  dropMaterializedViewIfExists(name: string, schema?: string): string;
29
30
  refreshMaterializedView(name: string, schema?: string, concurrently?: boolean): string;
30
31
  getNamespaces(connection: AbstractSqlConnection, ctx?: Transaction): Promise<string[]>;
31
32
  private getIgnoredNamespacesConditionSQL;
32
33
  loadInformationSchema(schema: DatabaseSchema, connection: AbstractSqlConnection, tables: Table[], schemas?: string[], ctx?: Transaction): Promise<void>;
34
+ /**
35
+ * Introspects direct partitions only: the `pg_inherits` join surfaces a parent's children but
36
+ * does not recurse into sub-partitioning (e.g. hash-of-range). Declarative `partitionBy`
37
+ * metadata does not express multi-level partitioning either, so grandchildren are intentionally
38
+ * invisible to schema diffing.
39
+ *
40
+ * Entries with an undefined schema bucket are resolved against `current_schema()` so they do
41
+ * not match same-named tables in unrelated schemas.
42
+ */
43
+ getPartitions(connection: AbstractSqlConnection, tablesBySchemas: Map<string | undefined, Table[]>, ctx?: Transaction): Promise<Dictionary<TablePartitioning>>;
33
44
  getAllIndexes(connection: AbstractSqlConnection, tables: Table[], ctx?: Transaction): Promise<Dictionary<IndexDef[]>>;
34
45
  /**
35
46
  * Parses column definitions from the full CREATE INDEX expression.
@@ -1,5 +1,6 @@
1
1
  import { DeferMode, EnumType, Type, Utils, } from '@mikro-orm/core';
2
2
  import { SchemaHelper } from '../../schema/SchemaHelper.js';
3
+ import { normalizePartitionBound, normalizePartitionDefinition } from '../../schema/partitioning.js';
3
4
  /** PostGIS system views that should be automatically ignored */
4
5
  const POSTGIS_VIEWS = ['geography_columns', 'geometry_columns'];
5
6
  export class PostgreSqlSchemaHelper extends SchemaHelper {
@@ -24,13 +25,22 @@ export class PostgreSqlSchemaHelper extends SchemaHelper {
24
25
  return `create database ${this.quote(name)}`;
25
26
  }
26
27
  getListTablesSQL() {
28
+ // The `pg_inherits` anti-join compares on (schema, table) pairs so cross-schema child
29
+ // partitions are excluded even when their schema is not on the session `search_path`
30
+ // (in which case `inhrelid::regclass::text` renders as `schema.name` rather than bare `name`,
31
+ // breaking a plain `table_name not in (...)` predicate).
27
32
  return (`select table_name, table_schema as schema_name, ` +
28
33
  `(select pg_catalog.obj_description(c.oid) from pg_catalog.pg_class c
29
34
  where c.oid = (select ('"' || table_schema || '"."' || table_name || '"')::regclass::oid) and c.relname = table_name) as table_comment ` +
30
- `from information_schema.tables ` +
35
+ `from information_schema.tables t ` +
31
36
  `where ${this.getIgnoredNamespacesConditionSQL('table_schema')} ` +
32
37
  `and table_name != 'geometry_columns' and table_name != 'spatial_ref_sys' and table_type != 'VIEW' ` +
33
- `and table_name not in (select inhrelid::regclass::text from pg_inherits) ` +
38
+ `and not exists (` +
39
+ `select 1 from pg_inherits i ` +
40
+ `join pg_class c on c.oid = i.inhrelid ` +
41
+ `join pg_namespace n on n.oid = c.relnamespace ` +
42
+ `where c.relname = t.table_name and n.nspname = t.table_schema` +
43
+ `) ` +
34
44
  `order by table_name`);
35
45
  }
36
46
  getIgnoredViewsCondition() {
@@ -81,6 +91,23 @@ export class PostgreSqlSchemaHelper extends SchemaHelper {
81
91
  const dataClause = withData ? ' with data' : ' with no data';
82
92
  return `create materialized view ${viewName} as ${definition}${dataClause}`;
83
93
  }
94
+ createTable(table, alter) {
95
+ const partitioning = table.getPartitioning();
96
+ if (!partitioning) {
97
+ return super.createTable(table, alter);
98
+ }
99
+ const [createTable, ...rest] = super.createTable(table, alter);
100
+ const partitions = partitioning.partitions.map(partition => {
101
+ const partitionName = this.quote(this.getTableName(partition.name, partition.schema ?? table.schema));
102
+ return `create table ${partitionName} partition of ${table.getQuotedName()} ${partition.bound}`;
103
+ });
104
+ // SchemaHelper.append() always terminates the CREATE TABLE with `;`; we rely on that to splice
105
+ // the `partition by ...` clause in before the terminator. Use slice instead of replace() so that
106
+ // regex replacement tokens like `$$`, `$&`, or `$1` inside user-supplied expressions (e.g., a
107
+ // callback that returns a dollar-quoted literal) are not interpreted as back-references.
108
+ const spliced = `${createTable.slice(0, -1)} partition by ${partitioning.definition};`;
109
+ return [spliced, ...rest, ...partitions];
110
+ }
84
111
  dropMaterializedViewIfExists(name, schema) {
85
112
  return `drop materialized view if exists ${this.quote(this.getTableName(name, schema))} cascade`;
86
113
  }
@@ -120,6 +147,7 @@ export class PostgreSqlSchemaHelper extends SchemaHelper {
120
147
  const indexes = await this.getAllIndexes(connection, tables, ctx);
121
148
  const checks = await this.getAllChecks(connection, tablesBySchema, ctx);
122
149
  const fks = await this.getAllForeignKeys(connection, tablesBySchema, ctx);
150
+ const partitionings = await this.getPartitions(connection, tablesBySchema, ctx);
123
151
  const triggers = await this.getAllTriggers(connection, tablesBySchema);
124
152
  for (const t of tables) {
125
153
  const key = this.getTableKey(t);
@@ -132,8 +160,61 @@ export class PostgreSqlSchemaHelper extends SchemaHelper {
132
160
  if (triggers[key]) {
133
161
  table.setTriggers(triggers[key]);
134
162
  }
163
+ table.setPartitioning(partitionings[key]);
135
164
  }
136
165
  }
166
+ /**
167
+ * Introspects direct partitions only: the `pg_inherits` join surfaces a parent's children but
168
+ * does not recurse into sub-partitioning (e.g. hash-of-range). Declarative `partitionBy`
169
+ * metadata does not express multi-level partitioning either, so grandchildren are intentionally
170
+ * invisible to schema diffing.
171
+ *
172
+ * Entries with an undefined schema bucket are resolved against `current_schema()` so they do
173
+ * not match same-named tables in unrelated schemas.
174
+ */
175
+ async getPartitions(connection, tablesBySchemas, ctx) {
176
+ // Collapse every (schema, table) pair into a single `values (...)` relation and join against
177
+ // the catalog, instead of building an OR-tree of per-schema `in (...)` predicates. This keeps
178
+ // the query size O(pairs) rather than O(schemas × predicate_overhead) and stays sargable when
179
+ // many schemas are in play.
180
+ const pairs = [...tablesBySchemas.entries()].flatMap(([schema, tables]) => tables.map(t => {
181
+ const schemaLiteral = schema == null ? 'null::text' : `${this.platform.quoteValue(schema)}::text`;
182
+ return `(${schemaLiteral}, ${this.platform.quoteValue(t.table_name)})`;
183
+ }));
184
+ if (pairs.length === 0) {
185
+ return {};
186
+ }
187
+ const sql = `with targets(schema_name, table_name) as (values ${pairs.join(', ')})
188
+ select parent_ns.nspname as schema_name,
189
+ parent.relname as table_name,
190
+ pg_get_partkeydef(parent.oid) as partition_definition,
191
+ child_ns.nspname as partition_schema_name,
192
+ child.relname as partition_name,
193
+ pg_get_expr(child.relpartbound, child.oid) as partition_bound
194
+ from targets
195
+ join pg_class parent on parent.relname = targets.table_name
196
+ join pg_namespace parent_ns on parent_ns.oid = parent.relnamespace
197
+ and parent_ns.nspname = coalesce(targets.schema_name, current_schema())
198
+ join pg_partitioned_table partitioned on partitioned.partrelid = parent.oid
199
+ left join pg_inherits inherits on inherits.inhparent = parent.oid
200
+ left join pg_class child on child.oid = inherits.inhrelid
201
+ left join pg_namespace child_ns on child_ns.oid = child.relnamespace
202
+ order by parent_ns.nspname, parent.relname, child_ns.nspname, child.relname`;
203
+ const rows = await connection.execute(sql, [], 'all', ctx);
204
+ const ret = {};
205
+ for (const row of rows) {
206
+ const key = this.getTableKey(row);
207
+ ret[key] ??= { definition: normalizePartitionDefinition(row.partition_definition), partitions: [] };
208
+ if (row.partition_name && row.partition_bound) {
209
+ ret[key].partitions.push({
210
+ name: row.partition_name,
211
+ schema: row.partition_schema_name,
212
+ bound: normalizePartitionBound(row.partition_bound),
213
+ });
214
+ }
215
+ }
216
+ return ret;
217
+ }
137
218
  async getAllIndexes(connection, tables, ctx) {
138
219
  const sql = this.getIndexesSQL(tables);
139
220
  const unquote = (str) => str.replace(/['"`]/g, '');
@@ -656,6 +737,12 @@ export class PostgreSqlSchemaHelper extends SchemaHelper {
656
737
  return col.join(' ');
657
738
  }
658
739
  getPreAlterTable(tableDiff, safe) {
740
+ if (tableDiff.changedPartitioning) {
741
+ const from = tableDiff.changedPartitioning.from?.definition;
742
+ const to = tableDiff.changedPartitioning.to?.definition;
743
+ const action = !from ? 'Adding' : !to ? 'Removing' : 'Changing';
744
+ throw new Error(`${action} partition definitions for existing PostgreSQL tables is not supported automatically (${tableDiff.name}: '${from ?? '<none>'}' -> '${to ?? '<none>'}'); create a manual migration instead`);
745
+ }
659
746
  const ret = [];
660
747
  const parts = tableDiff.name.split('.');
661
748
  const tableName = parts.pop();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mikro-orm/sql",
3
- "version": "7.1.0-dev.15",
3
+ "version": "7.1.0-dev.16",
4
4
  "description": "TypeScript ORM for Node.js based on Data Mapper, Unit of Work and Identity Map patterns. Supports MongoDB, MySQL, PostgreSQL and SQLite databases as well as usage with vanilla JavaScript.",
5
5
  "keywords": [
6
6
  "data-mapper",
@@ -53,7 +53,7 @@
53
53
  "@mikro-orm/core": "^7.0.11"
54
54
  },
55
55
  "peerDependencies": {
56
- "@mikro-orm/core": "7.1.0-dev.15"
56
+ "@mikro-orm/core": "7.1.0-dev.16"
57
57
  },
58
58
  "engines": {
59
59
  "node": ">= 22.17.0"
@@ -1,5 +1,6 @@
1
1
  import { ReferenceKind, isRaw, } from '@mikro-orm/core';
2
2
  import { DatabaseTable } from './DatabaseTable.js';
3
+ import { getTablePartitioning } from './partitioning.js';
3
4
  /**
4
5
  * @internal
5
6
  */
@@ -173,6 +174,9 @@ export class DatabaseSchema {
173
174
  }
174
175
  const table = schema.addTable(meta.collection, this.getSchemaName(meta, config, schemaName));
175
176
  table.comment = meta.comment;
177
+ if (meta.partitionBy) {
178
+ table.setPartitioning(getTablePartitioning(meta, this.getSchemaName(meta, config, schemaName), id => platform.quoteIdentifier(id)));
179
+ }
176
180
  // For TPT child entities, only use ownProps (properties defined in this entity only)
177
181
  // For all other entities (including TPT root), use all props
178
182
  const propsToProcess = meta.inheritanceType === 'tpt' && meta.tptParent && meta.ownProps ? meta.ownProps : meta.props;
@@ -1,6 +1,6 @@
1
1
  import { type Configuration, type DeferMode, type Dictionary, type EntityMetadata, type EntityProperty, type IndexCallback, type NamingStrategy } from '@mikro-orm/core';
2
2
  import type { SchemaHelper } from './SchemaHelper.js';
3
- import type { CheckDef, Column, ForeignKey, IndexDef, SqlTriggerDef } from '../typings.js';
3
+ import type { CheckDef, Column, ForeignKey, IndexDef, TablePartitioning, SqlTriggerDef } from '../typings.js';
4
4
  import type { AbstractSqlPlatform } from '../AbstractSqlPlatform.js';
5
5
  /**
6
6
  * @internal
@@ -15,6 +15,7 @@ export declare class DatabaseTable {
15
15
  items: string[];
16
16
  }>;
17
17
  comment?: string;
18
+ partitioning?: TablePartitioning;
18
19
  constructor(platform: AbstractSqlPlatform, name: string, schema?: string | undefined);
19
20
  getQuotedName(): string;
20
21
  getColumns(): Column[];
@@ -22,6 +23,9 @@ export declare class DatabaseTable {
22
23
  removeColumn(name: string): void;
23
24
  getIndexes(): IndexDef[];
24
25
  getChecks(): CheckDef[];
26
+ getPartitioning(): TablePartitioning | undefined;
27
+ /** @internal */
28
+ setPartitioning(partitioning?: TablePartitioning): void;
25
29
  getTriggers(): SqlTriggerDef[];
26
30
  /** @internal */
27
31
  setIndexes(indexes: IndexDef[]): void;
@@ -1,4 +1,5 @@
1
1
  import { DecimalType, EntitySchema, isRaw, ReferenceKind, t, Type, UnknownType, Utils, } from '@mikro-orm/core';
2
+ import { toEntityPartitionBy } from './partitioning.js';
2
3
  /**
3
4
  * @internal
4
5
  */
@@ -13,6 +14,7 @@ export class DatabaseTable {
13
14
  #platform;
14
15
  nativeEnums = {}; // for postgres
15
16
  comment;
17
+ partitioning;
16
18
  constructor(platform, name, schema) {
17
19
  this.name = name;
18
20
  this.schema = schema;
@@ -36,6 +38,13 @@ export class DatabaseTable {
36
38
  getChecks() {
37
39
  return this.#checks;
38
40
  }
41
+ getPartitioning() {
42
+ return this.partitioning;
43
+ }
44
+ /** @internal */
45
+ setPartitioning(partitioning) {
46
+ this.partitioning = partitioning;
47
+ }
39
48
  getTriggers() {
40
49
  return this.#triggers;
41
50
  }
@@ -185,6 +194,7 @@ export class DatabaseTable {
185
194
  const { fksOnColumnProps, fksOnStandaloneProps, columnFks, fkIndexes, nullableForeignKeys, skippedColumnNames } = this.foreignKeysToProps(namingStrategy, scalarPropertiesForRelations);
186
195
  const name = namingStrategy.getEntityName(this.name, this.schema);
187
196
  const schema = new EntitySchema({ name, collection: this.name, schema: this.schema, comment: this.comment });
197
+ schema.meta.partitionBy = toEntityPartitionBy(this.partitioning, this.name, this.schema);
188
198
  const compositeFkIndexes = {};
189
199
  const compositeFkUniques = {};
190
200
  const potentiallyUnmappedIndexes = this.#indexes.filter(index => !index.primary && // Skip primary index. Whether it's in use by scalar column or FK, it's already mapped.
@@ -1,5 +1,6 @@
1
1
  import { ArrayType, BooleanType, DateTimeType, inspect, JsonType, parseJsonSafe, Utils, } from '@mikro-orm/core';
2
2
  import { DatabaseTable } from './DatabaseTable.js';
3
+ import { diffPartitioning } from './partitioning.js';
3
4
  /**
4
5
  * Compares two Schemas and return an instance of SchemaDifference.
5
6
  */
@@ -200,6 +201,17 @@ export class SchemaComparator {
200
201
  });
201
202
  changes++;
202
203
  }
204
+ if (diffPartitioning(fromTable.getPartitioning(), toTable.getPartitioning(), this.#platform.getDefaultSchemaName())) {
205
+ tableDifferences.changedPartitioning = {
206
+ from: fromTable.getPartitioning(),
207
+ to: toTable.getPartitioning(),
208
+ };
209
+ this.log(`table partitioning changed for ${tableDifferences.name}`, {
210
+ fromPartitioning: fromTable.getPartitioning(),
211
+ toPartitioning: toTable.getPartitioning(),
212
+ });
213
+ changes++;
214
+ }
203
215
  const fromTableColumns = fromTable.getColumns();
204
216
  const toTableColumns = toTable.getColumns();
205
217
  // See if all the columns in "from" table exist in "to" table
@@ -0,0 +1,13 @@
1
+ import { splitCommaSeparatedIdentifiers, type EntityMetadata, type EntityPartitionBy } from '@mikro-orm/core';
2
+ import type { TablePartitioning } from '../typings.js';
3
+ export { splitCommaSeparatedIdentifiers };
4
+ /** @internal */
5
+ export declare function normalizePartitionDefinition(value: string): string;
6
+ /** @internal */
7
+ export declare function normalizePartitionBound(value: string): string;
8
+ /** @internal */
9
+ export declare const getTablePartitioning: (meta: EntityMetadata, tableSchema: string | undefined, quoteIdentifier?: (id: string) => string) => TablePartitioning | undefined;
10
+ /** @internal */
11
+ export declare const diffPartitioning: (from: TablePartitioning | undefined, to: TablePartitioning | undefined, defaultSchema: string | undefined) => boolean;
12
+ /** @internal */
13
+ export declare const toEntityPartitionBy: (partitioning: TablePartitioning | undefined, parentTableName?: string, parentSchema?: string) => EntityPartitionBy | undefined;
@@ -0,0 +1,326 @@
1
+ import { splitCommaSeparatedIdentifiers } from '@mikro-orm/core';
2
+ export { splitCommaSeparatedIdentifiers };
3
+ const skipQuotedLiteral = (value, start) => {
4
+ let i = start + 1;
5
+ while (i < value.length) {
6
+ if (value[i] === "'") {
7
+ if (value[i + 1] === "'") {
8
+ i += 2;
9
+ continue;
10
+ }
11
+ return i;
12
+ }
13
+ i++;
14
+ }
15
+ // Unterminated literal — point past the end so callers' `slice(start, end + 1)` includes
16
+ // the full remaining tail instead of dropping its last character.
17
+ return value.length;
18
+ };
19
+ /**
20
+ * Apply `transform` only to segments of `value` that lie outside single-quoted
21
+ * SQL literals, leaving literal content (including escaped `''`) untouched.
22
+ */
23
+ const mapOutsideLiterals = (value, transform) => {
24
+ let ret = '';
25
+ let buffer = '';
26
+ let i = 0;
27
+ while (i < value.length) {
28
+ if (value[i] === "'") {
29
+ ret += transform(buffer);
30
+ buffer = '';
31
+ const end = skipQuotedLiteral(value, i);
32
+ ret += value.slice(i, end + 1);
33
+ i = end + 1;
34
+ continue;
35
+ }
36
+ buffer += value[i];
37
+ i++;
38
+ }
39
+ return ret + transform(buffer);
40
+ };
41
+ const collapseWhitespace = (value) => value.replace(/\s+/g, ' ');
42
+ const normalizeWhitespace = (value) => mapOutsideLiterals(value, collapseWhitespace).trim();
43
+ const stripDoubleQuotes = (value) => mapOutsideLiterals(value, s => s.replaceAll('"', ''));
44
+ const normalizeQuotedIdentifiers = (value) => stripDoubleQuotes(normalizeWhitespace(value));
45
+ const findMatchingParenthesis = (value, start) => {
46
+ let depth = 0;
47
+ for (let i = start; i < value.length; i++) {
48
+ if (value[i] === "'") {
49
+ i = skipQuotedLiteral(value, i);
50
+ continue;
51
+ }
52
+ if (value[i] === '(') {
53
+ depth++;
54
+ continue;
55
+ }
56
+ if (value[i] === ')') {
57
+ depth--;
58
+ if (depth === 0) {
59
+ return i;
60
+ }
61
+ }
62
+ }
63
+ return -1;
64
+ };
65
+ const normalizePartitionLiterals = (value) => value
66
+ // PG pg_get_expr output often tacks `::text` onto string literals inside expressions; drop it
67
+ // so the catalog shape matches user-provided bounds. This applies symmetrically to both
68
+ // user metadata and catalog reads, so diffing converges. If a user intentionally writes a
69
+ // `::text` cast in a bound literal it will be stripped on both sides as well.
70
+ .replace(/('(?:[^']|'')*')::text\b/gi, '$1')
71
+ // Strip the `00:00:00` time component so catalog round-trips (timestamp[tz] bounds formatted
72
+ // via the session TimeZone) match user metadata that omitted the time part. Only collapse
73
+ // when we can confidently attribute the literal to a timestamp column: either a numeric
74
+ // offset is present (timestamptz catalog output) or an explicit `::timestamp[tz]` cast
75
+ // follows the literal. Bare `'YYYY-MM-DD 00:00:00'` without offset/cast could just as easily
76
+ // be a text/varchar list-partition value, and collapsing it would produce false-negative
77
+ // diffs.
78
+ .replace(/'(\d{4}-\d{2}-\d{2}) 00:00:00[+-]\d{2}(?::\d{2})?'/g, "'$1'")
79
+ .replace(/'(\d{4}-\d{2}-\d{2}) 00:00:00'(?=\s*::\s*timestamp(?:tz)?(?:\s+(?:with|without)\s+time\s+zone)?\b)/gi, "'$1'");
80
+ const unwrapOuterParentheses = (value) => {
81
+ const trimmed = value.trim();
82
+ if (!trimmed.startsWith('(') || !trimmed.endsWith(')')) {
83
+ return trimmed;
84
+ }
85
+ if (findMatchingParenthesis(trimmed, 0) !== trimmed.length - 1) {
86
+ return trimmed;
87
+ }
88
+ return trimmed.slice(1, -1).trim();
89
+ };
90
+ const unwrapAllOuterParentheses = (value) => {
91
+ let current = value.trim();
92
+ while (current.startsWith('(')) {
93
+ const unwrapped = unwrapOuterParentheses(current);
94
+ if (unwrapped === current) {
95
+ break;
96
+ }
97
+ current = unwrapped;
98
+ }
99
+ return current;
100
+ };
101
+ const normalizePartitionSqlFragment = (value) => {
102
+ const normalized = stripDoubleQuotes(normalizeWhitespace(normalizePartitionLiterals(value)));
103
+ let ret = '';
104
+ for (let i = 0; i < normalized.length; i++) {
105
+ if (normalized[i] === "'") {
106
+ const end = skipQuotedLiteral(normalized, i);
107
+ ret += normalized.slice(i, end + 1);
108
+ i = end;
109
+ continue;
110
+ }
111
+ if (normalized[i] === '(') {
112
+ const end = findMatchingParenthesis(normalized, i);
113
+ if (end === -1) {
114
+ ret += normalized.slice(i);
115
+ break;
116
+ }
117
+ const inner = unwrapAllOuterParentheses(normalizePartitionSqlFragment(normalized.slice(i + 1, end)));
118
+ ret += `(${inner})`;
119
+ i = end;
120
+ continue;
121
+ }
122
+ ret += normalized[i];
123
+ }
124
+ return normalizeWhitespace(unwrapAllOuterParentheses(ret));
125
+ };
126
+ const unquoteIdentifier = (value) => {
127
+ const trimmed = value.trim();
128
+ if (trimmed.length >= 2 && trimmed.startsWith('"') && trimmed.endsWith('"')) {
129
+ return trimmed.slice(1, -1).replaceAll('""', '"');
130
+ }
131
+ return trimmed;
132
+ };
133
+ /**
134
+ * Split a user-supplied partition name into `{ schema, name }`. Supports bare (`child`),
135
+ * schema-qualified (`schema.child`), and quoted (`"my.schema"."child"`) forms. Dots inside
136
+ * double-quoted identifiers are part of the identifier and do not split.
137
+ */
138
+ const splitPartitionName = (name) => {
139
+ let depth = 0;
140
+ for (let i = 0; i < name.length; i++) {
141
+ const ch = name[i];
142
+ if (ch === '"') {
143
+ if (name[i + 1] === '"') {
144
+ i++;
145
+ continue;
146
+ }
147
+ depth = depth === 0 ? 1 : 0;
148
+ continue;
149
+ }
150
+ if (ch === '.' && depth === 0) {
151
+ return {
152
+ schema: unquoteIdentifier(name.slice(0, i)),
153
+ name: unquoteIdentifier(name.slice(i + 1)),
154
+ };
155
+ }
156
+ }
157
+ return { name: unquoteIdentifier(name) };
158
+ };
159
+ const resolvePartitionKey = (meta, key, quoteIdentifier) => {
160
+ const trimmed = key.trim().replaceAll('"', '');
161
+ if (!trimmed) {
162
+ throw new Error(`Entity ${meta.className} has invalid partitionBy option: empty partition key`);
163
+ }
164
+ const prop = meta.root.properties[trimmed] ??
165
+ Object.values(meta.root.properties).find(candidate => candidate.fieldNames?.length === 1 && candidate.fieldNames[0] === trimmed);
166
+ if (!prop) {
167
+ throw new Error(`Entity ${meta.className} has invalid partitionBy option: unknown partition key '${key.trim()}'`);
168
+ }
169
+ if (prop.fieldNames?.length !== 1) {
170
+ throw new Error(`Entity ${meta.className} has invalid partitionBy option: partition key '${key.trim()}' maps to multiple columns ('${prop.fieldNames?.join("', '")}'); list them explicitly as partition keys`);
171
+ }
172
+ return quoteIdentifier(prop.fieldNames[0]);
173
+ };
174
+ /**
175
+ * Resolve the partition expression to a SQL fragment. Column-reference forms (array of keys
176
+ * or a clean comma-list of identifiers) are rewritten to the backing `fieldNames` and passed
177
+ * through `quoteIdentifier`. The callback form and the raw-SQL fallback (anything that isn't
178
+ * a clean identifier list, e.g. `date_trunc('day', created_at)`) are emitted verbatim — the
179
+ * user owns identifier quoting inside a raw expression.
180
+ */
181
+ const resolvePartitionExpression = (meta, expression, quoteIdentifier) => {
182
+ if (typeof expression === 'function') {
183
+ return normalizeWhitespace(expression(meta.createSchemaColumnMappingObject()));
184
+ }
185
+ if (Array.isArray(expression)) {
186
+ return expression.map(key => resolvePartitionKey(meta, key, quoteIdentifier)).join(', ');
187
+ }
188
+ const trimmed = expression.trim();
189
+ const keys = splitCommaSeparatedIdentifiers(trimmed);
190
+ if (keys) {
191
+ return keys.map(key => resolvePartitionKey(meta, key, quoteIdentifier)).join(', ');
192
+ }
193
+ return trimmed;
194
+ };
195
+ const createPartitionDefinition = (type, expression) => `${type.toLowerCase()} (${normalizeWhitespace(expression)})`;
196
+ /** @internal */
197
+ export function normalizePartitionDefinition(value) {
198
+ const normalized = normalizeWhitespace(value);
199
+ const match = /^(\w+)\s*(.*)$/.exec(normalized);
200
+ const rawType = match ? match[1] : normalized;
201
+ const type = rawType.toLowerCase();
202
+ const expression = match ? match[2].trim() : '';
203
+ if (!expression) {
204
+ return type;
205
+ }
206
+ if (!expression.startsWith('(')) {
207
+ return `${type} ${normalizePartitionSqlFragment(expression)}`;
208
+ }
209
+ return `${type} (${normalizePartitionSqlFragment(unwrapAllOuterParentheses(expression))})`;
210
+ }
211
+ const PARTITION_BOUND_KEYWORDS = /\b(for values|with|in|from|to|minvalue|maxvalue|null)\b/gi;
212
+ /** @internal */
213
+ export function normalizePartitionBound(value) {
214
+ const normalized = normalizeWhitespace(value);
215
+ if (!normalized) {
216
+ return '';
217
+ }
218
+ if (/^default$/i.test(normalized)) {
219
+ return 'default';
220
+ }
221
+ // Prepend `for values` if the caller passed a bare `with/in/from … to …` clause, then lowercase
222
+ // PG bound keywords outside quoted literals (so `FROM (MINVALUE) TO ('hello TO world')` becomes
223
+ // `from (minvalue) to ('hello TO world')` with the inner TO inside the literal preserved).
224
+ // PG's `pg_get_expr` emits `MINVALUE`/`MAXVALUE`/`NULL` in uppercase, so case-folding them here
225
+ // prevents a perpetual diff against user-supplied lowercase bounds.
226
+ const prefixed = /^for values\b/i.test(normalized) ? normalized : `for values ${normalized}`;
227
+ const lowered = mapOutsideLiterals(prefixed, segment => segment.replace(PARTITION_BOUND_KEYWORDS, match => match.toLowerCase()));
228
+ return normalizePartitionSqlFragment(lowered);
229
+ }
230
+ const createPartitionBound = (value) => normalizePartitionBound(value);
231
+ const createHashPartitions = (tableName, tableSchema, partitions) => {
232
+ const count = typeof partitions === 'number' ? partitions : partitions.length;
233
+ return Array.from({ length: count }, (_, remainder) => {
234
+ const bound = normalizePartitionBound(`with (modulus ${count}, remainder ${remainder})`);
235
+ if (typeof partitions === 'number') {
236
+ return { name: `${tableName}_${remainder}`, schema: tableSchema, bound };
237
+ }
238
+ const { name, schema } = splitPartitionName(partitions[remainder]);
239
+ return { name, schema: schema ?? tableSchema, bound };
240
+ });
241
+ };
242
+ const createExplicitPartitions = (tableName, tableSchema, partitions) => partitions.map((partition, index) => {
243
+ const resolvedName = partition.name ?? `${tableName}_${index}`;
244
+ const { name, schema } = splitPartitionName(resolvedName);
245
+ return {
246
+ name,
247
+ schema: schema ?? tableSchema,
248
+ bound: createPartitionBound(partition.values),
249
+ };
250
+ });
251
+ /** @internal */
252
+ export const getTablePartitioning = (meta, tableSchema, quoteIdentifier = id => id) => {
253
+ if (!meta.partitionBy) {
254
+ return undefined;
255
+ }
256
+ const definition = createPartitionDefinition(meta.partitionBy.type, resolvePartitionExpression(meta, meta.partitionBy.expression, quoteIdentifier));
257
+ const partitions = meta.partitionBy.type === 'hash'
258
+ ? createHashPartitions(meta.tableName, tableSchema, meta.partitionBy.partitions)
259
+ : createExplicitPartitions(meta.tableName, tableSchema, meta.partitionBy.partitions);
260
+ return { definition, partitions };
261
+ };
262
+ /** @internal */
263
+ export const diffPartitioning = (from, to, defaultSchema) => {
264
+ if (!from && !to) {
265
+ return false;
266
+ }
267
+ if (!from || !to) {
268
+ return true;
269
+ }
270
+ if (normalizeQuotedIdentifiers(normalizePartitionDefinition(from.definition)) !==
271
+ normalizeQuotedIdentifiers(normalizePartitionDefinition(to.definition))) {
272
+ return true;
273
+ }
274
+ if (from.partitions.length !== to.partitions.length) {
275
+ return true;
276
+ }
277
+ const normalizeSchema = (schema) => (schema && schema !== defaultSchema ? schema : '');
278
+ const serializePartition = (partition) => `${normalizeSchema(partition.schema)}.${partition.name}:${normalizeQuotedIdentifiers(normalizePartitionBound(partition.bound))}`;
279
+ const fromPartitions = from.partitions.map(serializePartition).sort();
280
+ const toPartitions = to.partitions.map(serializePartition).sort();
281
+ return fromPartitions.some((partition, index) => partition !== toPartitions[index]);
282
+ };
283
+ const SUPPORTED_PARTITION_TYPES = ['hash', 'list', 'range'];
284
+ const isSupportedPartitionType = (value) => SUPPORTED_PARTITION_TYPES.includes(value);
285
+ /** @internal */
286
+ export const toEntityPartitionBy = (partitioning, parentTableName, parentSchema) => {
287
+ if (!partitioning) {
288
+ return undefined;
289
+ }
290
+ const normalizedDefinition = normalizePartitionDefinition(partitioning.definition);
291
+ const normalizedPartitions = partitioning.partitions.map(partition => ({
292
+ ...partition,
293
+ bound: normalizePartitionBound(partition.bound),
294
+ }));
295
+ // Split the leading type keyword off of the definition without using `split(' ')`, which would
296
+ // shatter quoted literals containing spaces. Match a bareword prefix followed by whitespace.
297
+ const [, rawType = normalizedDefinition, rawExpression = ''] = /^(\S+)(?:\s+([\s\S]*))?$/.exec(normalizeWhitespace(normalizedDefinition)) ?? [];
298
+ const type = rawType.toLowerCase();
299
+ if (!isSupportedPartitionType(type)) {
300
+ throw new Error(`Unsupported partition type '${rawType}' in definition '${partitioning.definition}'`);
301
+ }
302
+ const expression = unwrapOuterParentheses(rawExpression);
303
+ const qualify = (partition) => partition.schema && partition.schema !== parentSchema ? `${partition.schema}.${partition.name}` : partition.name;
304
+ if (type === 'hash') {
305
+ // Collapse to a bare count when catalog names follow the default
306
+ // `${parentTableName}_${remainder}` pattern and live in the parent's schema, or when we have
307
+ // no parent context to compare against (backwards-compatible behavior for callers that pass
308
+ // just the `TablePartitioning`). Otherwise preserve the explicit name array so the next DDL
309
+ // generation reproduces the same children.
310
+ const usesDefaultShape = parentTableName == null ||
311
+ normalizedPartitions.every((p, i) => p.name === `${parentTableName}_${i}` && (!p.schema || p.schema === parentSchema));
312
+ return {
313
+ type,
314
+ expression,
315
+ partitions: usesDefaultShape ? normalizedPartitions.length : normalizedPartitions.map(qualify),
316
+ };
317
+ }
318
+ return {
319
+ type,
320
+ expression,
321
+ partitions: normalizedPartitions.map(partition => ({
322
+ name: qualify(partition),
323
+ values: partition.bound === 'default' ? 'default' : partition.bound.replace(/^for values\s+/i, ''),
324
+ })),
325
+ };
326
+ };
package/typings.d.ts CHANGED
@@ -123,6 +123,15 @@ export interface SqlTriggerDef {
123
123
  when?: string;
124
124
  expression?: string;
125
125
  }
126
+ export interface TablePartition {
127
+ name: string;
128
+ schema?: string;
129
+ bound: string;
130
+ }
131
+ export interface TablePartitioning {
132
+ definition: string;
133
+ partitions: TablePartition[];
134
+ }
126
135
  export interface ColumnDifference {
127
136
  oldColumnName: string;
128
137
  column: Column;
@@ -132,6 +141,10 @@ export interface ColumnDifference {
132
141
  export interface TableDifference {
133
142
  name: string;
134
143
  changedComment?: string;
144
+ changedPartitioning?: {
145
+ from?: TablePartitioning;
146
+ to?: TablePartitioning;
147
+ };
135
148
  fromTable: DatabaseTable;
136
149
  toTable: DatabaseTable;
137
150
  addedColumns: Dictionary<Column>;