@strapi/database 4.5.1 → 4.6.0-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/dialects/dialect.js +4 -0
- package/lib/dialects/mysql/constants.js +6 -0
- package/lib/dialects/mysql/database-inspector.js +37 -0
- package/lib/dialects/mysql/index.js +19 -0
- package/lib/dialects/postgresql/index.js +1 -1
- package/lib/dialects/sqlite/index.js +2 -2
- package/lib/entity-manager/regular-relations.js +109 -2
- package/lib/index.d.ts +2 -2
- package/lib/query/helpers/index.js +1 -0
- package/lib/query/helpers/streams/index.js +5 -0
- package/lib/query/helpers/streams/readable.js +174 -0
- package/lib/query/query-builder.js +11 -0
- package/lib/schema/index.d.ts +1 -1
- package/lib/utils/knex.js +10 -0
- package/package.json +3 -2
package/lib/dialects/dialect.js
CHANGED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const { MARIADB, MYSQL } = require('./constants');
|
|
4
|
+
|
|
5
|
+
const SQL_QUERIES = {
|
|
6
|
+
VERSION: `SELECT version() as version`,
|
|
7
|
+
};
|
|
8
|
+
|
|
9
|
+
class MysqlDatabaseInspector {
|
|
10
|
+
constructor(db) {
|
|
11
|
+
this.db = db;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
async getInformation() {
|
|
15
|
+
let database;
|
|
16
|
+
let versionNumber;
|
|
17
|
+
try {
|
|
18
|
+
const [results] = await this.db.connection.raw(SQL_QUERIES.VERSION);
|
|
19
|
+
const versionSplit = results[0].version.split('-');
|
|
20
|
+
const databaseName = versionSplit[1];
|
|
21
|
+
versionNumber = versionSplit[0];
|
|
22
|
+
database = databaseName && databaseName.toLowerCase() === 'mariadb' ? MARIADB : MYSQL;
|
|
23
|
+
} catch (e) {
|
|
24
|
+
return {
|
|
25
|
+
database: null,
|
|
26
|
+
version: null,
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
return {
|
|
31
|
+
database,
|
|
32
|
+
version: versionNumber,
|
|
33
|
+
};
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
module.exports = MysqlDatabaseInspector;
|
|
@@ -1,13 +1,19 @@
|
|
|
1
1
|
'use strict';
|
|
2
2
|
|
|
3
|
+
const semver = require('semver');
|
|
4
|
+
|
|
3
5
|
const { Dialect } = require('../dialect');
|
|
4
6
|
const MysqlSchemaInspector = require('./schema-inspector');
|
|
7
|
+
const MysqlDatabaseInspector = require('./database-inspector');
|
|
8
|
+
const { MYSQL } = require('./constants');
|
|
5
9
|
|
|
6
10
|
class MysqlDialect extends Dialect {
|
|
7
11
|
constructor(db) {
|
|
8
12
|
super(db);
|
|
9
13
|
|
|
10
14
|
this.schemaInspector = new MysqlSchemaInspector(db);
|
|
15
|
+
this.databaseInspector = new MysqlDatabaseInspector(db);
|
|
16
|
+
this.info = null;
|
|
11
17
|
}
|
|
12
18
|
|
|
13
19
|
configure() {
|
|
@@ -38,6 +44,8 @@ class MysqlDialect extends Dialect {
|
|
|
38
44
|
} catch (err) {
|
|
39
45
|
// Ignore error due to lack of session permissions
|
|
40
46
|
}
|
|
47
|
+
|
|
48
|
+
this.info = await this.databaseInspector.getInformation();
|
|
41
49
|
}
|
|
42
50
|
|
|
43
51
|
async startSchemaUpdate() {
|
|
@@ -57,6 +65,17 @@ class MysqlDialect extends Dialect {
|
|
|
57
65
|
return true;
|
|
58
66
|
}
|
|
59
67
|
|
|
68
|
+
supportsWindowFunctions() {
|
|
69
|
+
const isMysqlDB = !this.info.database || this.info.database === MYSQL;
|
|
70
|
+
const isBeforeV8 = !semver.valid(this.info.version) || semver.lt(this.info.version, '8.0.0');
|
|
71
|
+
|
|
72
|
+
if (isMysqlDB && isBeforeV8) {
|
|
73
|
+
return false;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
return true;
|
|
77
|
+
}
|
|
78
|
+
|
|
60
79
|
usesForeignKeys() {
|
|
61
80
|
return true;
|
|
62
81
|
}
|
|
@@ -15,7 +15,7 @@ class PostgresDialect extends Dialect {
|
|
|
15
15
|
return true;
|
|
16
16
|
}
|
|
17
17
|
|
|
18
|
-
initialize() {
|
|
18
|
+
async initialize() {
|
|
19
19
|
this.db.connection.client.driver.types.setTypeParser(1082, 'text', (v) => v); // Don't cast DATE string to Date()
|
|
20
20
|
this.db.connection.client.driver.types.setTypeParser(1700, 'text', parseFloat);
|
|
21
21
|
}
|
|
@@ -5,13 +5,13 @@ const fse = require('fs-extra');
|
|
|
5
5
|
|
|
6
6
|
const errors = require('../../errors');
|
|
7
7
|
const { Dialect } = require('../dialect');
|
|
8
|
-
const
|
|
8
|
+
const SqliteSchemaInspector = require('./schema-inspector');
|
|
9
9
|
|
|
10
10
|
class SqliteDialect extends Dialect {
|
|
11
11
|
constructor(db) {
|
|
12
12
|
super(db);
|
|
13
13
|
|
|
14
|
-
this.schemaInspector = new
|
|
14
|
+
this.schemaInspector = new SqliteSchemaInspector(db);
|
|
15
15
|
}
|
|
16
16
|
|
|
17
17
|
configure() {
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
'use strict';
|
|
2
2
|
|
|
3
3
|
const { map, isEmpty } = require('lodash/fp');
|
|
4
|
+
|
|
4
5
|
const {
|
|
5
6
|
isBidirectional,
|
|
6
7
|
isOneToAny,
|
|
@@ -10,6 +11,7 @@ const {
|
|
|
10
11
|
hasInverseOrderColumn,
|
|
11
12
|
} = require('../metadata/relations');
|
|
12
13
|
const { createQueryBuilder } = require('../query');
|
|
14
|
+
const { addSchema } = require('../utils/knex');
|
|
13
15
|
|
|
14
16
|
/**
|
|
15
17
|
* If some relations currently exist for this oneToX relation, on the one side, this function removes them and update the inverse order if needed.
|
|
@@ -195,6 +197,12 @@ const cleanOrderColumns = async ({ id, attribute, db, inverseRelIds, transaction
|
|
|
195
197
|
return;
|
|
196
198
|
}
|
|
197
199
|
|
|
200
|
+
// Handle databases that don't support window function ROW_NUMBER
|
|
201
|
+
if (!strapi.db.dialect.supportsWindowFunctions()) {
|
|
202
|
+
await cleanOrderColumnsForOldDatabases({ id, attribute, db, inverseRelIds, transaction: trx });
|
|
203
|
+
return;
|
|
204
|
+
}
|
|
205
|
+
|
|
198
206
|
const { joinTable } = attribute;
|
|
199
207
|
const { joinColumn, inverseJoinColumn, orderColumnName, inverseOrderColumnName } = joinTable;
|
|
200
208
|
const update = [];
|
|
@@ -241,7 +249,8 @@ const cleanOrderColumns = async ({ id, attribute, db, inverseRelIds, transaction
|
|
|
241
249
|
)
|
|
242
250
|
.transacting(trx);
|
|
243
251
|
break;
|
|
244
|
-
default:
|
|
252
|
+
default: {
|
|
253
|
+
const joinTableName = addSchema(joinTable.name);
|
|
245
254
|
await db.connection
|
|
246
255
|
.raw(
|
|
247
256
|
`UPDATE ?? as a
|
|
@@ -252,9 +261,10 @@ const cleanOrderColumns = async ({ id, attribute, db, inverseRelIds, transaction
|
|
|
252
261
|
WHERE ${where.join(' OR ')}
|
|
253
262
|
) AS b
|
|
254
263
|
WHERE b.id = a.id`,
|
|
255
|
-
[
|
|
264
|
+
[joinTableName, ...updateBinding, ...selectBinding, joinTableName, ...whereBinding]
|
|
256
265
|
)
|
|
257
266
|
.transacting(trx);
|
|
267
|
+
}
|
|
258
268
|
/*
|
|
259
269
|
`UPDATE :joinTable: as a
|
|
260
270
|
SET :orderColumn: = b.src_order, :inverseOrderColumn: = b.inv_order
|
|
@@ -271,6 +281,103 @@ const cleanOrderColumns = async ({ id, attribute, db, inverseRelIds, transaction
|
|
|
271
281
|
}
|
|
272
282
|
};
|
|
273
283
|
|
|
284
|
+
const cleanOrderColumnsForOldDatabases = async ({
|
|
285
|
+
id,
|
|
286
|
+
attribute,
|
|
287
|
+
db,
|
|
288
|
+
inverseRelIds,
|
|
289
|
+
transaction: trx,
|
|
290
|
+
}) => {
|
|
291
|
+
const { joinTable } = attribute;
|
|
292
|
+
const { joinColumn, inverseJoinColumn, orderColumnName, inverseOrderColumnName } = joinTable;
|
|
293
|
+
|
|
294
|
+
const now = new Date().valueOf();
|
|
295
|
+
|
|
296
|
+
if (hasOrderColumn(attribute) && id) {
|
|
297
|
+
const tempOrderTableName = `tempOrderTableName_${now}`;
|
|
298
|
+
try {
|
|
299
|
+
await db.connection
|
|
300
|
+
.raw(
|
|
301
|
+
`
|
|
302
|
+
CREATE TEMPORARY TABLE :tempOrderTableName:
|
|
303
|
+
SELECT
|
|
304
|
+
id,
|
|
305
|
+
(
|
|
306
|
+
SELECT count(*)
|
|
307
|
+
FROM :joinTableName: b
|
|
308
|
+
WHERE a.:orderColumnName: >= b.:orderColumnName: AND a.:joinColumnName: = b.:joinColumnName: AND a.:joinColumnName: = :id
|
|
309
|
+
) AS src_order
|
|
310
|
+
FROM :joinTableName: a`,
|
|
311
|
+
{
|
|
312
|
+
tempOrderTableName,
|
|
313
|
+
joinTableName: joinTable.name,
|
|
314
|
+
orderColumnName,
|
|
315
|
+
joinColumnName: joinColumn.name,
|
|
316
|
+
id,
|
|
317
|
+
}
|
|
318
|
+
)
|
|
319
|
+
.transacting(trx);
|
|
320
|
+
await db.connection
|
|
321
|
+
.raw(
|
|
322
|
+
`UPDATE ?? as a, (SELECT * FROM ??) AS b
|
|
323
|
+
SET ?? = b.src_order
|
|
324
|
+
WHERE a.id = b.id`,
|
|
325
|
+
[joinTable.name, tempOrderTableName, orderColumnName]
|
|
326
|
+
)
|
|
327
|
+
.transacting(trx);
|
|
328
|
+
} finally {
|
|
329
|
+
await db.connection
|
|
330
|
+
.raw(`DROP TEMPORARY TABLE IF EXISTS ??`, [tempOrderTableName])
|
|
331
|
+
.transacting(trx);
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
if (hasInverseOrderColumn(attribute) && !isEmpty(inverseRelIds)) {
|
|
336
|
+
const tempInvOrderTableName = `tempInvOrderTableName_${now}`;
|
|
337
|
+
try {
|
|
338
|
+
await db.connection
|
|
339
|
+
.raw(
|
|
340
|
+
`
|
|
341
|
+
CREATE TEMPORARY TABLE ??
|
|
342
|
+
SELECT
|
|
343
|
+
id,
|
|
344
|
+
(
|
|
345
|
+
SELECT count(*)
|
|
346
|
+
FROM ?? b
|
|
347
|
+
WHERE a.?? >= b.?? AND a.?? = b.?? AND a.?? IN (${inverseRelIds
|
|
348
|
+
.map(() => '?')
|
|
349
|
+
.join(', ')})
|
|
350
|
+
) AS inv_order
|
|
351
|
+
FROM ?? a`,
|
|
352
|
+
[
|
|
353
|
+
tempInvOrderTableName,
|
|
354
|
+
joinTable.name,
|
|
355
|
+
inverseOrderColumnName,
|
|
356
|
+
inverseOrderColumnName,
|
|
357
|
+
inverseJoinColumn.name,
|
|
358
|
+
inverseJoinColumn.name,
|
|
359
|
+
inverseJoinColumn.name,
|
|
360
|
+
...inverseRelIds,
|
|
361
|
+
joinTable.name,
|
|
362
|
+
]
|
|
363
|
+
)
|
|
364
|
+
.transacting(trx);
|
|
365
|
+
await db.connection
|
|
366
|
+
.raw(
|
|
367
|
+
`UPDATE ?? as a, (SELECT * FROM ??) AS b
|
|
368
|
+
SET ?? = b.inv_order
|
|
369
|
+
WHERE a.id = b.id`,
|
|
370
|
+
[joinTable.name, tempInvOrderTableName, inverseOrderColumnName]
|
|
371
|
+
)
|
|
372
|
+
.transacting(trx);
|
|
373
|
+
} finally {
|
|
374
|
+
await db.connection
|
|
375
|
+
.raw(`DROP TEMPORARY TABLE IF EXISTS ??`, [tempInvOrderTableName])
|
|
376
|
+
.transacting(trx);
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
};
|
|
380
|
+
|
|
274
381
|
module.exports = {
|
|
275
382
|
deletePreviousOneToAnyRelations,
|
|
276
383
|
deletePreviousAnyToOneRelations,
|
package/lib/index.d.ts
CHANGED
|
@@ -31,8 +31,7 @@ type AttributeOperators<T, K extends keyof T> = {
|
|
|
31
31
|
|
|
32
32
|
export type WhereParams<T> = {
|
|
33
33
|
[K in keyof T]?: T[K] | T[K][] | AttributeOperators<T, K>;
|
|
34
|
-
} &
|
|
35
|
-
LogicalOperators<T>;
|
|
34
|
+
} & LogicalOperators<T>;
|
|
36
35
|
|
|
37
36
|
type Sortables<T> = {
|
|
38
37
|
// check sortable
|
|
@@ -158,6 +157,7 @@ export interface Database {
|
|
|
158
157
|
lifecycles: LifecycleProvider;
|
|
159
158
|
migrations: MigrationProvider;
|
|
160
159
|
entityManager: EntityManager;
|
|
160
|
+
queryBuilder: any;
|
|
161
161
|
|
|
162
162
|
query<T extends keyof AllTypes>(uid: T): QueryFromContentType<T>;
|
|
163
163
|
}
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const { Readable } = require('stream');
|
|
4
|
+
const { isFinite } = require('lodash/fp');
|
|
5
|
+
|
|
6
|
+
const { applyPopulate } = require('../populate');
|
|
7
|
+
const { fromRow } = require('../transform');
|
|
8
|
+
|
|
9
|
+
const knexQueryDone = Symbol('knexQueryDone');
|
|
10
|
+
const knexPerformingQuery = Symbol('knexPerformingQuery');
|
|
11
|
+
|
|
12
|
+
class ReadableStrapiQuery extends Readable {
|
|
13
|
+
/**
|
|
14
|
+
* @param {object} options
|
|
15
|
+
* @param {ReturnType<typeof import('../../query-builder')>} options.qb The strapi query builder instance
|
|
16
|
+
* @param {string} options.uid The model uid
|
|
17
|
+
* @param {import('../../../index').Database} options.db The Database instance
|
|
18
|
+
* @param {boolean} [options.mapResults] The maximum number of entities to fetch per query
|
|
19
|
+
* @param {number} [options.batchSize] The maximum number of entities to fetch per query
|
|
20
|
+
*/
|
|
21
|
+
constructor({ qb, db, uid, mapResults = true, batchSize = 500 }) {
|
|
22
|
+
super({ objectMode: true, highWaterMark: batchSize });
|
|
23
|
+
|
|
24
|
+
// Extract offset & limit from the query-builder's state
|
|
25
|
+
const { offset, limit } = qb.state;
|
|
26
|
+
|
|
27
|
+
// Original offset value
|
|
28
|
+
this._offset = isFinite(offset) ? offset : 0;
|
|
29
|
+
|
|
30
|
+
// Max amount of entities to fetch, force null as undefined value
|
|
31
|
+
this._limit = isFinite(limit) ? limit : null;
|
|
32
|
+
|
|
33
|
+
// Total amount of entities fetched
|
|
34
|
+
this._fetched = 0;
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Original query
|
|
38
|
+
* @type {import('knex').Knex}
|
|
39
|
+
*/
|
|
40
|
+
this._query = qb.getKnexQuery();
|
|
41
|
+
|
|
42
|
+
// Query Builder instance
|
|
43
|
+
this._qb = qb;
|
|
44
|
+
|
|
45
|
+
// Database related properties
|
|
46
|
+
this._db = db;
|
|
47
|
+
this._uid = uid;
|
|
48
|
+
this._meta = db.metadata.get(uid);
|
|
49
|
+
|
|
50
|
+
// Stream params
|
|
51
|
+
this._batchSize = batchSize;
|
|
52
|
+
this._mapResults = mapResults;
|
|
53
|
+
|
|
54
|
+
// States
|
|
55
|
+
this[knexPerformingQuery] = false;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
_destroy(err, cb) {
|
|
59
|
+
// If the stream is destroyed while a query is being made, then wait for a
|
|
60
|
+
// kQueryDone event to be emitted before actually destroying the stream
|
|
61
|
+
if (this[knexPerformingQuery]) {
|
|
62
|
+
this.once(knexQueryDone, (er) => cb(err || er));
|
|
63
|
+
} else {
|
|
64
|
+
cb(err);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Custom ._read() implementation
|
|
70
|
+
*
|
|
71
|
+
* NOTE: Here "size" means the number of entities to be read from the database.
|
|
72
|
+
* Not the actual byte size, as it would means that we need to return partial entities.
|
|
73
|
+
*
|
|
74
|
+
* @param {number} size
|
|
75
|
+
*/
|
|
76
|
+
async _read(size) {
|
|
77
|
+
const query = this._query;
|
|
78
|
+
|
|
79
|
+
// Remove the original offset & limit properties from the query
|
|
80
|
+
// Theoretically, they would be replaced by calling them again, but this is just to be sure
|
|
81
|
+
query.clear('limit').clear('offset');
|
|
82
|
+
|
|
83
|
+
// Define the maximum read size based on the limit and the requested size
|
|
84
|
+
// NOTE: size is equal to _batchSize by default. Since we want to allow customizing it on
|
|
85
|
+
// the fly, we need to use its value instead of batchSize when computing the maxReadSize value
|
|
86
|
+
const maxReadSize =
|
|
87
|
+
// if no limit is defined in the query, use the given size,
|
|
88
|
+
// otherwise, use the smallest value between the two
|
|
89
|
+
this._limit === null ? size : Math.min(size, this._limit);
|
|
90
|
+
|
|
91
|
+
// Compute the limit for the next query
|
|
92
|
+
const limit =
|
|
93
|
+
// If a limit is defined
|
|
94
|
+
this._limit !== null &&
|
|
95
|
+
// And reading `maxReadSize` would fetch too many entities (> _limit)
|
|
96
|
+
this._fetched + maxReadSize > this._limit
|
|
97
|
+
? // Then adjust the limit so that it only get the remaining entities
|
|
98
|
+
this._limit - this._fetched
|
|
99
|
+
: // Else, use the max read size
|
|
100
|
+
maxReadSize;
|
|
101
|
+
|
|
102
|
+
// If we don't have anything left to read (_limit === _fetched),
|
|
103
|
+
// don't bother making the query and end the stream by pushing null
|
|
104
|
+
if (limit <= 0) {
|
|
105
|
+
this.push(null);
|
|
106
|
+
return;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Compute the offset (base offset + number of entities already fetched)
|
|
110
|
+
const offset = this._offset + this._fetched;
|
|
111
|
+
|
|
112
|
+
// Update the query with the new values (offset + limit)
|
|
113
|
+
query.offset(offset).limit(limit);
|
|
114
|
+
|
|
115
|
+
// Lock the ._destroy()
|
|
116
|
+
this[knexPerformingQuery] = true;
|
|
117
|
+
|
|
118
|
+
let results;
|
|
119
|
+
let count;
|
|
120
|
+
let err;
|
|
121
|
+
|
|
122
|
+
try {
|
|
123
|
+
// Execute the query and store the results & count
|
|
124
|
+
results = await query;
|
|
125
|
+
|
|
126
|
+
const { populate } = this._qb.state;
|
|
127
|
+
|
|
128
|
+
// Apply populate if needed
|
|
129
|
+
if (populate) {
|
|
130
|
+
await applyPopulate(results, populate, { qb: this._qb, uid: this._uid, db: this._db });
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// Map results if asked to
|
|
134
|
+
if (this._mapResults) {
|
|
135
|
+
results = fromRow(this._meta, results);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
count = results.length;
|
|
139
|
+
} catch (e) {
|
|
140
|
+
err = e;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// Unlock the ._destroy()
|
|
144
|
+
this[knexPerformingQuery] = false;
|
|
145
|
+
|
|
146
|
+
// Tell ._destroy() that it's now safe to close the db connection
|
|
147
|
+
if (this.destroyed) {
|
|
148
|
+
this.emit(knexQueryDone);
|
|
149
|
+
return;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// If there is an error, destroy with the given error
|
|
153
|
+
if (err) {
|
|
154
|
+
this.destroy(err);
|
|
155
|
+
return;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Update the amount of fetched entities
|
|
159
|
+
this._fetched += count;
|
|
160
|
+
|
|
161
|
+
// While there is at least one value to unpack
|
|
162
|
+
for (const result of results) {
|
|
163
|
+
this.push(result);
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
// If the amount of fetched entities is smaller than the
|
|
167
|
+
// maximum read size, Then push null to close the stream
|
|
168
|
+
if (this._fetched === this._limit || count < this._batchSize) {
|
|
169
|
+
this.push(null);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
module.exports = ReadableStrapiQuery;
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
const _ = require('lodash/fp');
|
|
4
4
|
|
|
5
|
+
const { DatabaseError } = require('../errors');
|
|
5
6
|
const helpers = require('./helpers');
|
|
6
7
|
|
|
7
8
|
const createQueryBuilder = (uid, db, initialState = {}) => {
|
|
@@ -488,6 +489,16 @@ const createQueryBuilder = (uid, db, initialState = {}) => {
|
|
|
488
489
|
db.dialect.transformErrors(error);
|
|
489
490
|
}
|
|
490
491
|
},
|
|
492
|
+
|
|
493
|
+
stream({ mapResults = true } = {}) {
|
|
494
|
+
if (state.type === 'select') {
|
|
495
|
+
return new helpers.ReadableQuery({ qb: this, db, uid, mapResults });
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
throw new DatabaseError(
|
|
499
|
+
`query-builder.stream() has been called with an unsupported query type: "${state.type}"`
|
|
500
|
+
);
|
|
501
|
+
},
|
|
491
502
|
};
|
|
492
503
|
};
|
|
493
504
|
|
package/lib/schema/index.d.ts
CHANGED
package/lib/utils/knex.js
CHANGED
|
@@ -7,6 +7,16 @@ const isKnexQuery = (value) => {
|
|
|
7
7
|
return value instanceof KnexBuilder || value instanceof KnexRaw;
|
|
8
8
|
};
|
|
9
9
|
|
|
10
|
+
/**
|
|
11
|
+
* Adds the name of the schema to the table name if the schema was defined by the user.
|
|
12
|
+
* Users can set the db schema only for Postgres in strapi database config.
|
|
13
|
+
*/
|
|
14
|
+
const addSchema = (tableName) => {
|
|
15
|
+
const schemaName = strapi.db.connection.getSchemaName();
|
|
16
|
+
return schemaName ? `${schemaName}.${tableName}` : tableName;
|
|
17
|
+
};
|
|
18
|
+
|
|
10
19
|
module.exports = {
|
|
11
20
|
isKnexQuery,
|
|
21
|
+
addSchema,
|
|
12
22
|
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@strapi/database",
|
|
3
|
-
"version": "4.
|
|
3
|
+
"version": "4.6.0-alpha.0",
|
|
4
4
|
"description": "Strapi's database layer",
|
|
5
5
|
"homepage": "https://strapi.io",
|
|
6
6
|
"bugs": {
|
|
@@ -36,11 +36,12 @@
|
|
|
36
36
|
"fs-extra": "10.0.0",
|
|
37
37
|
"knex": "1.0.7",
|
|
38
38
|
"lodash": "4.17.21",
|
|
39
|
+
"semver": "7.3.8",
|
|
39
40
|
"umzug": "3.1.1"
|
|
40
41
|
},
|
|
41
42
|
"engines": {
|
|
42
43
|
"node": ">=14.19.1 <=18.x.x",
|
|
43
44
|
"npm": ">=6.0.0"
|
|
44
45
|
},
|
|
45
|
-
"gitHead": "
|
|
46
|
+
"gitHead": "b7a87dcffc6f44e18eedef92e354096ffe32ce0c"
|
|
46
47
|
}
|