@strapi/database 4.6.0-alpha.1 → 4.6.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,85 @@
1
+ 'use strict';
2
+
3
+ const relationsOrderer = require('../relations-orderer');
4
+
5
+ describe('relations orderer', () => {
6
+ test('connect at the end', () => {
7
+ const orderer = relationsOrderer(
8
+ [
9
+ { id: 2, order: 4 },
10
+ { id: 3, order: 10 },
11
+ ],
12
+ 'id',
13
+ 'order'
14
+ );
15
+
16
+ orderer.connect([{ id: 4, position: { end: true } }, { id: 5 }]);
17
+
18
+ expect(orderer.get()).toMatchObject([
19
+ { id: 2, order: 4 },
20
+ { id: 3, order: 10 },
21
+ { id: 4, order: 10.5 },
22
+ { id: 5, order: 10.5 },
23
+ ]);
24
+ });
25
+
26
+ test('connect at the start', () => {
27
+ const orderer = relationsOrderer(
28
+ [
29
+ { id: 2, order: 4 },
30
+ { id: 3, order: 10 },
31
+ ],
32
+ 'id',
33
+ 'order'
34
+ );
35
+
36
+ orderer.connect([{ id: 4, position: { start: true } }]);
37
+
38
+ expect(orderer.get()).toMatchObject([
39
+ { id: 4, order: 0.5 },
40
+ { id: 2, order: 4 },
41
+ { id: 3, order: 10 },
42
+ ]);
43
+ });
44
+
45
+ test('connect multiple relations', () => {
46
+ const orderer = relationsOrderer(
47
+ [
48
+ { id: 2, order: 4 },
49
+ { id: 3, order: 10 },
50
+ ],
51
+ 'id',
52
+ 'order'
53
+ );
54
+
55
+ orderer.connect([
56
+ { id: 4, position: { before: 2 } },
57
+ { id: 4, position: { before: 3 } },
58
+ { id: 5, position: { before: 4 } },
59
+ ]);
60
+
61
+ expect(orderer.get()).toMatchObject([
62
+ { id: 2, order: 4 },
63
+ { id: 5, order: 9.5 },
64
+ { id: 4, order: 9.5 },
65
+ { id: 3, order: 10 },
66
+ ]);
67
+ });
68
+
69
+ test('connect with no initial relations', () => {
70
+ const orderer = relationsOrderer([], 'id', 'order');
71
+
72
+ orderer.connect([
73
+ { id: 1, position: { start: true } },
74
+ { id: 2, position: { start: true } },
75
+ { id: 3, position: { after: 1 } },
76
+ { id: 1, position: { after: 2 } },
77
+ ]);
78
+
79
+ expect(orderer.get()).toMatchObject([
80
+ { id: 2, order: 0.5 },
81
+ { id: 1, order: 0.5 },
82
+ { id: 3, order: 0.5 },
83
+ ]);
84
+ });
85
+ });
@@ -3,6 +3,7 @@
3
3
  const {
4
4
  isUndefined,
5
5
  castArray,
6
+ compact,
6
7
  isNil,
7
8
  has,
8
9
  isString,
@@ -18,6 +19,7 @@ const {
18
19
  isNumber,
19
20
  map,
20
21
  difference,
22
+ uniqBy,
21
23
  } = require('lodash/fp');
22
24
  const types = require('../types');
23
25
  const { createField } = require('../fields');
@@ -37,6 +39,7 @@ const {
37
39
  deleteRelations,
38
40
  cleanOrderColumns,
39
41
  } = require('./regular-relations');
42
+ const relationsOrderer = require('./relations-orderer');
40
43
 
41
44
  const toId = (value) => value.id || value;
42
45
  const toIds = (value) => castArray(value || []).map(toId);
@@ -75,7 +78,10 @@ const toAssocs = (data) => {
75
78
  }
76
79
 
77
80
  return {
78
- connect: toIdArray(data?.connect),
81
+ connect: toIdArray(data?.connect).map((elm) => ({
82
+ id: elm.id,
83
+ position: elm.position ? elm.position : { end: true },
84
+ })),
79
85
  disconnect: toIdArray(data?.disconnect),
80
86
  };
81
87
  };
@@ -211,22 +217,19 @@ const createEntityManager = (db) => {
211
217
  }
212
218
 
213
219
  const dataToInsert = processData(metadata, data, { withDefaults: true });
214
- let id;
215
220
 
216
- const trx = await strapi.db.transaction();
217
- try {
218
- const res = await this.createQueryBuilder(uid)
219
- .insert(dataToInsert)
220
- .transacting(trx)
221
- .execute();
221
+ const res = await this.createQueryBuilder(uid).insert(dataToInsert).execute();
222
222
 
223
- id = res[0].id || res[0];
223
+ const id = res[0].id || res[0];
224
224
 
225
+ const trx = await strapi.db.transaction();
226
+ try {
225
227
  await this.attachRelations(uid, id, data, { transaction: trx });
226
228
 
227
229
  await trx.commit();
228
230
  } catch (e) {
229
231
  await trx.rollback();
232
+ await this.createQueryBuilder(uid).where({ id }).delete().execute();
230
233
  throw e;
231
234
  }
232
235
 
@@ -285,7 +288,11 @@ const createEntityManager = (db) => {
285
288
  throw new Error('Update requires a where parameter');
286
289
  }
287
290
 
288
- const entity = await this.createQueryBuilder(uid).select('id').where(where).first().execute();
291
+ const entity = await this.createQueryBuilder(uid)
292
+ .select('*')
293
+ .where(where)
294
+ .first()
295
+ .execute({ mapResults: false });
289
296
 
290
297
  if (!entity) {
291
298
  return null;
@@ -293,23 +300,19 @@ const createEntityManager = (db) => {
293
300
 
294
301
  const { id } = entity;
295
302
 
296
- const trx = await strapi.db.transaction();
297
- try {
298
- const dataToUpdate = processData(metadata, data);
303
+ const dataToUpdate = processData(metadata, data);
299
304
 
300
- if (!isEmpty(dataToUpdate)) {
301
- await this.createQueryBuilder(uid)
302
- .where({ id })
303
- .update(dataToUpdate)
304
- .transacting(trx)
305
- .execute();
306
- }
305
+ if (!isEmpty(dataToUpdate)) {
306
+ await this.createQueryBuilder(uid).where({ id }).update(dataToUpdate).execute();
307
+ }
307
308
 
309
+ const trx = await strapi.db.transaction();
310
+ try {
308
311
  await this.updateRelations(uid, id, data, { transaction: trx });
309
-
310
312
  await trx.commit();
311
313
  } catch (e) {
312
314
  await trx.rollback();
315
+ await this.createQueryBuilder(uid).where({ id }).update(entity).execute();
313
316
  throw e;
314
317
  }
315
318
 
@@ -372,10 +375,10 @@ const createEntityManager = (db) => {
372
375
 
373
376
  const { id } = entity;
374
377
 
378
+ await this.createQueryBuilder(uid).where({ id }).delete().execute();
379
+
375
380
  const trx = await strapi.db.transaction();
376
381
  try {
377
- await this.createQueryBuilder(uid).where({ id }).delete().transacting(trx).execute();
378
-
379
382
  await this.deleteRelations(uid, id, { transaction: trx });
380
383
 
381
384
  await trx.commit();
@@ -564,7 +567,7 @@ const createEntityManager = (db) => {
564
567
  }
565
568
 
566
569
  // prepare new relations to insert
567
- const insert = relsToAdd.map((data) => {
570
+ const insert = uniqBy('id', relsToAdd).map((data) => {
568
571
  return {
569
572
  [joinColumn.name]: id,
570
573
  [inverseJoinColumn.name]: data.id,
@@ -574,11 +577,23 @@ const createEntityManager = (db) => {
574
577
  });
575
578
 
576
579
  // add order value
577
- if (hasOrderColumn(attribute)) {
578
- insert.forEach((rel, idx) => {
579
- rel[orderColumnName] = idx + 1;
580
+ if (cleanRelationData.set && hasOrderColumn(attribute)) {
581
+ insert.forEach((data, idx) => {
582
+ data[orderColumnName] = idx + 1;
583
+ });
584
+ } else if (cleanRelationData.connect && hasOrderColumn(attribute)) {
585
+ // use position attributes to calculate order
586
+ const orderMap = relationsOrderer([], inverseJoinColumn.name, joinTable.orderColumnName)
587
+ .connect(relsToAdd)
588
+ .get()
589
+ // set the order based on the order of the ids
590
+ .reduce((acc, rel, idx) => ({ ...acc, [rel.id]: idx }), {});
591
+
592
+ insert.forEach((row) => {
593
+ row[orderColumnName] = orderMap[row[inverseJoinColumn.name]];
580
594
  });
581
595
  }
596
+
582
597
  // add inv_order value
583
598
  if (hasInverseOrderColumn(attribute)) {
584
599
  const maxResults = await db
@@ -818,27 +833,53 @@ const createEntityManager = (db) => {
818
833
  }
819
834
 
820
835
  // prepare relations to insert
821
- const insert = cleanRelationData.connect.map((relToAdd) => ({
836
+ const insert = uniqBy('id', cleanRelationData.connect).map((relToAdd) => ({
822
837
  [joinColumn.name]: id,
823
838
  [inverseJoinColumn.name]: relToAdd.id,
824
839
  ...(joinTable.on || {}),
825
840
  ...(relToAdd.__pivot || {}),
826
841
  }));
827
842
 
828
- // add order value
829
843
  if (hasOrderColumn(attribute)) {
830
- const orderMax = (
831
- await this.createQueryBuilder(joinTable.name)
832
- .max(orderColumnName)
833
- .where({ [joinColumn.name]: id })
834
- .where(joinTable.on || {})
835
- .first()
836
- .transacting(trx)
837
- .execute()
838
- ).max;
844
+ // Get all adjacent relations and the one with the highest order
845
+ const adjacentRelations = await this.createQueryBuilder(joinTable.name)
846
+ .where({
847
+ $or: [
848
+ {
849
+ [joinColumn.name]: id,
850
+ [inverseJoinColumn.name]: {
851
+ $in: compact(
852
+ cleanRelationData.connect.map(
853
+ (r) => r.position?.after || r.position?.before
854
+ )
855
+ ),
856
+ },
857
+ },
858
+ {
859
+ [joinColumn.name]: id,
860
+ [orderColumnName]: this.createQueryBuilder(joinTable.name)
861
+ .max(orderColumnName)
862
+ .where({ [joinColumn.name]: id })
863
+ .where(joinTable.on || {})
864
+ .transacting(trx)
865
+ .getKnexQuery(),
866
+ },
867
+ ],
868
+ })
869
+ .where(joinTable.on || {})
870
+ .transacting(trx)
871
+ .execute();
839
872
 
840
- insert.forEach((row, idx) => {
841
- row[orderColumnName] = orderMax + idx + 1;
873
+ const orderMap = relationsOrderer(
874
+ adjacentRelations,
875
+ inverseJoinColumn.name,
876
+ joinTable.orderColumnName
877
+ )
878
+ .connect(cleanRelationData.connect)
879
+ .getOrderMap();
880
+
881
+ insert.forEach((row) => {
882
+ row[orderColumnName] = orderMap[row[inverseJoinColumn.name]];
842
883
  });
843
884
  }
844
885
 
@@ -904,7 +945,7 @@ const createEntityManager = (db) => {
904
945
  continue;
905
946
  }
906
947
 
907
- const insert = cleanRelationData.set.map((relToAdd) => ({
948
+ const insert = uniqBy('id', cleanRelationData.set).map((relToAdd) => ({
908
949
  [joinColumn.name]: id,
909
950
  [inverseJoinColumn.name]: relToAdd.id,
910
951
  ...(joinTable.on || {}),
@@ -1,6 +1,7 @@
1
1
  'use strict';
2
2
 
3
3
  const { map, isEmpty } = require('lodash/fp');
4
+ const { randomBytes } = require('crypto');
4
5
 
5
6
  const {
6
7
  isBidirectional,
@@ -197,60 +198,42 @@ const cleanOrderColumns = async ({ id, attribute, db, inverseRelIds, transaction
197
198
  return;
198
199
  }
199
200
 
200
- // Handle databases that don't support window function ROW_NUMBER
201
- if (!strapi.db.dialect.supportsWindowFunctions()) {
202
- await cleanOrderColumnsForOldDatabases({ id, attribute, db, inverseRelIds, transaction: trx });
203
- return;
204
- }
205
-
206
- const { joinTable } = attribute;
207
- const { joinColumn, inverseJoinColumn, orderColumnName, inverseOrderColumnName } = joinTable;
208
- const update = [];
209
- const updateBinding = [];
210
- const select = ['??'];
211
- const selectBinding = ['id'];
212
- const where = [];
213
- const whereBinding = [];
214
-
215
- if (hasOrderColumn(attribute) && id) {
216
- update.push('?? = b.src_order');
217
- updateBinding.push(orderColumnName);
218
- select.push('ROW_NUMBER() OVER (PARTITION BY ?? ORDER BY ??) AS src_order');
219
- selectBinding.push(joinColumn.name, orderColumnName);
220
- where.push('?? = ?');
221
- whereBinding.push(joinColumn.name, id);
222
- }
223
-
224
- if (hasInverseOrderColumn(attribute) && !isEmpty(inverseRelIds)) {
225
- update.push('?? = b.inv_order');
226
- updateBinding.push(inverseOrderColumnName);
227
- select.push('ROW_NUMBER() OVER (PARTITION BY ?? ORDER BY ??) AS inv_order');
228
- selectBinding.push(inverseJoinColumn.name, inverseOrderColumnName);
229
- where.push(`?? IN (${inverseRelIds.map(() => '?').join(', ')})`);
230
- whereBinding.push(inverseJoinColumn.name, ...inverseRelIds);
231
- }
232
-
233
- // raw query as knex doesn't allow updating from a subquery
234
- // https://github.com/knex/knex/issues/2504
235
201
  switch (strapi.db.dialect.client) {
236
202
  case 'mysql':
237
- await db.connection
238
- .raw(
239
- `UPDATE
240
- ?? as a,
241
- (
242
- SELECT ${select.join(', ')}
243
- FROM ??
244
- WHERE ${where.join(' OR ')}
245
- ) AS b
246
- SET ${update.join(', ')}
247
- WHERE b.id = a.id`,
248
- [joinTable.name, ...selectBinding, joinTable.name, ...whereBinding, ...updateBinding]
249
- )
250
- .transacting(trx);
203
+ await cleanOrderColumnsForInnoDB({ id, attribute, db, inverseRelIds, transaction: trx });
251
204
  break;
252
205
  default: {
206
+ const { joinTable } = attribute;
207
+ const { joinColumn, inverseJoinColumn, orderColumnName, inverseOrderColumnName } = joinTable;
208
+ const update = [];
209
+ const updateBinding = [];
210
+ const select = ['??'];
211
+ const selectBinding = ['id'];
212
+ const where = [];
213
+ const whereBinding = [];
214
+
215
+ if (hasOrderColumn(attribute) && id) {
216
+ update.push('?? = b.src_order');
217
+ updateBinding.push(orderColumnName);
218
+ select.push('ROW_NUMBER() OVER (PARTITION BY ?? ORDER BY ??) AS src_order');
219
+ selectBinding.push(joinColumn.name, orderColumnName);
220
+ where.push('?? = ?');
221
+ whereBinding.push(joinColumn.name, id);
222
+ }
223
+
224
+ if (hasInverseOrderColumn(attribute) && !isEmpty(inverseRelIds)) {
225
+ update.push('?? = b.inv_order');
226
+ updateBinding.push(inverseOrderColumnName);
227
+ select.push('ROW_NUMBER() OVER (PARTITION BY ?? ORDER BY ??) AS inv_order');
228
+ selectBinding.push(inverseJoinColumn.name, inverseOrderColumnName);
229
+ where.push(`?? IN (${inverseRelIds.map(() => '?').join(', ')})`);
230
+ whereBinding.push(inverseJoinColumn.name, ...inverseRelIds);
231
+ }
232
+
253
233
  const joinTableName = addSchema(joinTable.name);
234
+
235
+ // raw query as knex doesn't allow updating from a subquery
236
+ // https://github.com/knex/knex/issues/2504
254
237
  await db.connection
255
238
  .raw(
256
239
  `UPDATE ?? as a
@@ -264,24 +247,29 @@ const cleanOrderColumns = async ({ id, attribute, db, inverseRelIds, transaction
264
247
  [joinTableName, ...updateBinding, ...selectBinding, joinTableName, ...whereBinding]
265
248
  )
266
249
  .transacting(trx);
250
+
251
+ /*
252
+ `UPDATE :joinTable: as a
253
+ SET :orderColumn: = b.src_order, :inverseOrderColumn: = b.inv_order
254
+ FROM (
255
+ SELECT
256
+ id,
257
+ ROW_NUMBER() OVER ( PARTITION BY :joinColumn: ORDER BY :orderColumn:) AS src_order,
258
+ ROW_NUMBER() OVER ( PARTITION BY :inverseJoinColumn: ORDER BY :inverseOrderColumn:) AS inv_order
259
+ FROM :joinTable:
260
+ WHERE :joinColumn: = :id OR :inverseJoinColumn: IN (:inverseRelIds)
261
+ ) AS b
262
+ WHERE b.id = a.id`,
263
+ */
267
264
  }
268
- /*
269
- `UPDATE :joinTable: as a
270
- SET :orderColumn: = b.src_order, :inverseOrderColumn: = b.inv_order
271
- FROM (
272
- SELECT
273
- id,
274
- ROW_NUMBER() OVER ( PARTITION BY :joinColumn: ORDER BY :orderColumn:) AS src_order,
275
- ROW_NUMBER() OVER ( PARTITION BY :inverseJoinColumn: ORDER BY :inverseOrderColumn:) AS inv_order
276
- FROM :joinTable:
277
- WHERE :joinColumn: = :id OR :inverseJoinColumn: IN (:inverseRelIds)
278
- ) AS b
279
- WHERE b.id = a.id`,
280
- */
281
265
  }
282
266
  };
283
267
 
284
- const cleanOrderColumnsForOldDatabases = async ({
268
+ /*
269
+ * Ensure that orders are following a 1, 2, 3 sequence, without gap.
270
+ * The use of a temporary table instead of a window function makes the query compatible with MySQL 5 and prevents some deadlocks to happen in innoDB databases
271
+ */
272
+ const cleanOrderColumnsForInnoDB = async ({
285
273
  id,
286
274
  attribute,
287
275
  db,
@@ -292,14 +280,15 @@ const cleanOrderColumnsForOldDatabases = async ({
292
280
  const { joinColumn, inverseJoinColumn, orderColumnName, inverseOrderColumnName } = joinTable;
293
281
 
294
282
  const now = new Date().valueOf();
283
+ const randomHex = randomBytes(16).toString('hex');
295
284
 
296
285
  if (hasOrderColumn(attribute) && id) {
297
- const tempOrderTableName = `tempOrderTableName_${now}`;
286
+ const tempOrderTableName = `orderTable_${now}_${randomHex}`;
298
287
  try {
299
288
  await db.connection
300
289
  .raw(
301
290
  `
302
- CREATE TEMPORARY TABLE :tempOrderTableName:
291
+ CREATE TABLE :tempOrderTableName:
303
292
  SELECT
304
293
  id,
305
294
  (
@@ -317,6 +306,9 @@ const cleanOrderColumnsForOldDatabases = async ({
317
306
  }
318
307
  )
319
308
  .transacting(trx);
309
+
310
+ // raw query as knex doesn't allow updating from a subquery
311
+ // https://github.com/knex/knex/issues/2504
320
312
  await db.connection
321
313
  .raw(
322
314
  `UPDATE ?? as a, (SELECT * FROM ??) AS b
@@ -326,19 +318,17 @@ const cleanOrderColumnsForOldDatabases = async ({
326
318
  )
327
319
  .transacting(trx);
328
320
  } finally {
329
- await db.connection
330
- .raw(`DROP TEMPORARY TABLE IF EXISTS ??`, [tempOrderTableName])
331
- .transacting(trx);
321
+ await db.connection.raw(`DROP TABLE IF EXISTS ??`, [tempOrderTableName]).transacting(trx);
332
322
  }
333
323
  }
334
324
 
335
325
  if (hasInverseOrderColumn(attribute) && !isEmpty(inverseRelIds)) {
336
- const tempInvOrderTableName = `tempInvOrderTableName_${now}`;
326
+ const tempInvOrderTableName = `invOrderTable_${now}_${randomHex}`;
337
327
  try {
338
328
  await db.connection
339
329
  .raw(
340
330
  `
341
- CREATE TEMPORARY TABLE ??
331
+ CREATE TABLE ??
342
332
  SELECT
343
333
  id,
344
334
  (
@@ -371,9 +361,7 @@ const cleanOrderColumnsForOldDatabases = async ({
371
361
  )
372
362
  .transacting(trx);
373
363
  } finally {
374
- await db.connection
375
- .raw(`DROP TEMPORARY TABLE IF EXISTS ??`, [tempInvOrderTableName])
376
- .transacting(trx);
364
+ await db.connection.raw(`DROP TABLE IF EXISTS ??`, [tempInvOrderTableName]).transacting(trx);
377
365
  }
378
366
  }
379
367
  };
@@ -0,0 +1,126 @@
1
+ 'use strict';
2
+
3
+ const _ = require('lodash/fp');
4
+
5
+ /**
6
+ * Responsible for calculating the relations order when connecting them.
7
+ *
8
+ * The connect method takes an array of relations with positional attributes:
9
+ * - before: the id of the relation to connect before
10
+ * - after: the id of the relation to connect after
11
+ * - end: it should be at the end
12
+ * - start: it should be at the start
13
+ *
14
+ * Example:
15
+ * - Having a connect array like:
16
+ * [ { id: 4, before: 2 }, { id: 4, before: 3}, {id: 5, before: 4} ]
17
+ * - With the initial relations:
18
+ * [ { id: 2, order: 4 }, { id: 3, order: 10 } ]
19
+ * - Step by step, going through the connect array, the array of relations would be:
20
+ * [ { id: 4, order: 3.5 }, { id: 2, order: 4 }, { id: 3, order: 10 } ]
21
+ * [ { id: 2, order: 4 }, { id: 4, order: 3.5 }, { id: 3, order: 10 } ]
22
+ * [ { id: 2, order: 4 }, { id: 5, order: 3.5 }, { id: 4, order: 3.5 }, { id: 3, order: 10 } ]
23
+ * - The final step would be to recalculate fractional order values.
24
+ * [ { id: 2, order: 4 }, { id: 5, order: 3.33 }, { id: 4, order: 3.66 }, { id: 3, order: 10 } ]
25
+ *
26
+ * Constraints:
27
+ * - Expects you will never connect a relation before / after one that does not exist
28
+ * - Expect initArr to have all relations referenced in the positional attributes
29
+ *
30
+ * @param {Array<*>} initArr - array of relations to initialize the class with
31
+ * @param {string} idColumn - the column name of the id
32
+ * @param {string} orderColumn - the column name of the order
33
+ * @return {*}
34
+ */
35
+ const relationsOrderer = (initArr, idColumn, orderColumn) => {
36
+ const arr = _.castArray(initArr || []).map((r) => ({
37
+ init: true,
38
+ id: r[idColumn],
39
+ order: r[orderColumn],
40
+ }));
41
+
42
+ const maxOrder = _.maxBy('order', arr)?.order || 0;
43
+
44
+ // TODO: Improve performance by using a map
45
+ const findRelation = (id) => {
46
+ const idx = arr.findIndex((r) => r.id === id);
47
+ return { idx, relation: arr[idx] };
48
+ };
49
+
50
+ const removeRelation = (r) => {
51
+ const { idx } = findRelation(r.id);
52
+ if (idx >= 0) {
53
+ arr.splice(idx, 1);
54
+ }
55
+ };
56
+
57
+ const insertRelation = (r) => {
58
+ let idx;
59
+
60
+ if (r.position?.before) {
61
+ const { idx: _idx, relation } = findRelation(r.position.before);
62
+ if (relation.init) r.order = relation.order - 0.5;
63
+ else r.order = relation.order;
64
+ idx = _idx;
65
+ } else if (r.position?.after) {
66
+ const { idx: _idx, relation } = findRelation(r.position.after);
67
+ if (relation.init) r.order = relation.order + 0.5;
68
+ else r.order = relation.order;
69
+ idx = _idx + 1;
70
+ } else if (r.position?.start) {
71
+ r.order = 0.5;
72
+ idx = 0;
73
+ } else {
74
+ r.order = maxOrder + 0.5;
75
+ idx = arr.length;
76
+ }
77
+
78
+ // Insert the relation in the array
79
+ arr.splice(idx, 0, r);
80
+ };
81
+
82
+ return {
83
+ disconnect(relations) {
84
+ _.castArray(relations).forEach((relation) => {
85
+ removeRelation(relation);
86
+ });
87
+ return this;
88
+ },
89
+ connect(relations) {
90
+ _.castArray(relations).forEach((relation) => {
91
+ this.disconnect(relation);
92
+
93
+ try {
94
+ insertRelation(relation);
95
+ } catch (err) {
96
+ strapi.log.error(err);
97
+ throw new Error(
98
+ `Could not connect ${relation.id}, position ${JSON.stringify(
99
+ relation.position
100
+ )} is invalid`
101
+ );
102
+ }
103
+ });
104
+ return this;
105
+ },
106
+ get() {
107
+ return arr;
108
+ },
109
+ /**
110
+ * Get a map between the relation id and its order
111
+ */
112
+ getOrderMap() {
113
+ return _(arr)
114
+ .groupBy('order')
115
+ .reduce((acc, relations) => {
116
+ if (relations[0]?.init) return acc;
117
+ relations.forEach((relation, idx) => {
118
+ acc[relation.id] = Math.floor(relation.order) + (idx + 1) / (relations.length + 1);
119
+ });
120
+ return acc;
121
+ }, {});
122
+ },
123
+ };
124
+ };
125
+
126
+ module.exports = relationsOrderer;
package/lib/index.d.ts CHANGED
@@ -1,3 +1,4 @@
1
+ import { Knex } from 'knex';
1
2
  import { LifecycleProvider } from './lifecycles';
2
3
  import { MigrationProvider } from './migrations';
3
4
  import { SchemaProvider } from './schema';
@@ -31,8 +32,7 @@ type AttributeOperators<T, K extends keyof T> = {
31
32
 
32
33
  export type WhereParams<T> = {
33
34
  [K in keyof T]?: T[K] | T[K][] | AttributeOperators<T, K>;
34
- } &
35
- LogicalOperators<T>;
35
+ } & LogicalOperators<T>;
36
36
 
37
37
  type Sortables<T> = {
38
38
  // check sortable
@@ -158,6 +158,9 @@ export interface Database {
158
158
  lifecycles: LifecycleProvider;
159
159
  migrations: MigrationProvider;
160
160
  entityManager: EntityManager;
161
+ queryBuilder: any;
162
+ metadata: any;
163
+ connection: Knex;
161
164
 
162
165
  query<T extends keyof AllTypes>(uid: T): QueryFromContentType<T>;
163
166
  }
@@ -136,7 +136,7 @@ const createCompoLinkModelMeta = (baseModelMeta) => {
136
136
  type: 'string',
137
137
  },
138
138
  order: {
139
- type: 'integer',
139
+ type: 'float',
140
140
  column: {
141
141
  unsigned: true,
142
142
  defaultTo: null,
@@ -231,7 +231,7 @@ const createMorphToMany = (attributeName, attribute, meta, metadata) => {
231
231
  type: 'string',
232
232
  },
233
233
  order: {
234
- type: 'integer',
234
+ type: 'float',
235
235
  column: {
236
236
  unsigned: true,
237
237
  },
@@ -485,7 +485,7 @@ const createJoinTable = (metadata, { attributeName, attribute, meta }) => {
485
485
  // order
486
486
  if (isAnyToMany(attribute)) {
487
487
  metadataSchema.attributes[orderColumnName] = {
488
- type: 'integer',
488
+ type: 'float',
489
489
  column: {
490
490
  unsigned: true,
491
491
  defaultTo: null,
@@ -502,7 +502,7 @@ const createJoinTable = (metadata, { attributeName, attribute, meta }) => {
502
502
  // inv order
503
503
  if (isBidirectional(attribute) && isManyToAny(attribute)) {
504
504
  metadataSchema.attributes[inverseOrderColumnName] = {
505
- type: 'integer',
505
+ type: 'float',
506
506
  column: {
507
507
  unsigned: true,
508
508
  defaultTo: null,
@@ -7,4 +7,5 @@ module.exports = {
7
7
  ...require('./populate'),
8
8
  ...require('./where'),
9
9
  ...require('./transform'),
10
+ ...require('./streams'),
10
11
  };
@@ -0,0 +1,5 @@
1
+ 'use strict';
2
+
3
+ module.exports = {
4
+ ReadableQuery: require('./readable'),
5
+ };
@@ -0,0 +1,174 @@
1
+ 'use strict';
2
+
3
+ const { Readable } = require('stream');
4
+ const { isFinite } = require('lodash/fp');
5
+
6
+ const { applyPopulate } = require('../populate');
7
+ const { fromRow } = require('../transform');
8
+
9
+ const knexQueryDone = Symbol('knexQueryDone');
10
+ const knexPerformingQuery = Symbol('knexPerformingQuery');
11
+
12
+ class ReadableStrapiQuery extends Readable {
13
+ /**
14
+ * @param {object} options
15
+ * @param {ReturnType<typeof import('../../query-builder')>} options.qb The strapi query builder instance
16
+ * @param {string} options.uid The model uid
17
+ * @param {import('../../../index').Database} options.db The Database instance
18
+ * @param {boolean} [options.mapResults] The maximum number of entities to fetch per query
19
+ * @param {number} [options.batchSize] The maximum number of entities to fetch per query
20
+ */
21
+ constructor({ qb, db, uid, mapResults = true, batchSize = 500 }) {
22
+ super({ objectMode: true, highWaterMark: batchSize });
23
+
24
+ // Extract offset & limit from the query-builder's state
25
+ const { offset, limit } = qb.state;
26
+
27
+ // Original offset value
28
+ this._offset = isFinite(offset) ? offset : 0;
29
+
30
+ // Max amount of entities to fetch, force null as undefined value
31
+ this._limit = isFinite(limit) ? limit : null;
32
+
33
+ // Total amount of entities fetched
34
+ this._fetched = 0;
35
+
36
+ /**
37
+ * Original query
38
+ * @type {import('knex').Knex}
39
+ */
40
+ this._query = qb.getKnexQuery();
41
+
42
+ // Query Builder instance
43
+ this._qb = qb;
44
+
45
+ // Database related properties
46
+ this._db = db;
47
+ this._uid = uid;
48
+ this._meta = db.metadata.get(uid);
49
+
50
+ // Stream params
51
+ this._batchSize = batchSize;
52
+ this._mapResults = mapResults;
53
+
54
+ // States
55
+ this[knexPerformingQuery] = false;
56
+ }
57
+
58
+ _destroy(err, cb) {
59
+ // If the stream is destroyed while a query is being made, then wait for a
60
+ // kQueryDone event to be emitted before actually destroying the stream
61
+ if (this[knexPerformingQuery]) {
62
+ this.once(knexQueryDone, (er) => cb(err || er));
63
+ } else {
64
+ cb(err);
65
+ }
66
+ }
67
+
68
+ /**
69
+ * Custom ._read() implementation
70
+ *
71
+ * NOTE: Here "size" means the number of entities to be read from the database.
72
+ * Not the actual byte size, as it would means that we need to return partial entities.
73
+ *
74
+ * @param {number} size
75
+ */
76
+ async _read(size) {
77
+ const query = this._query;
78
+
79
+ // Remove the original offset & limit properties from the query
80
+ // Theoretically, they would be replaced by calling them again, but this is just to be sure
81
+ query.clear('limit').clear('offset');
82
+
83
+ // Define the maximum read size based on the limit and the requested size
84
+ // NOTE: size is equal to _batchSize by default. Since we want to allow customizing it on
85
+ // the fly, we need to use its value instead of batchSize when computing the maxReadSize value
86
+ const maxReadSize =
87
+ // if no limit is defined in the query, use the given size,
88
+ // otherwise, use the smallest value between the two
89
+ this._limit === null ? size : Math.min(size, this._limit);
90
+
91
+ // Compute the limit for the next query
92
+ const limit =
93
+ // If a limit is defined
94
+ this._limit !== null &&
95
+ // And reading `maxReadSize` would fetch too many entities (> _limit)
96
+ this._fetched + maxReadSize > this._limit
97
+ ? // Then adjust the limit so that it only get the remaining entities
98
+ this._limit - this._fetched
99
+ : // Else, use the max read size
100
+ maxReadSize;
101
+
102
+ // If we don't have anything left to read (_limit === _fetched),
103
+ // don't bother making the query and end the stream by pushing null
104
+ if (limit <= 0) {
105
+ this.push(null);
106
+ return;
107
+ }
108
+
109
+ // Compute the offset (base offset + number of entities already fetched)
110
+ const offset = this._offset + this._fetched;
111
+
112
+ // Update the query with the new values (offset + limit)
113
+ query.offset(offset).limit(limit);
114
+
115
+ // Lock the ._destroy()
116
+ this[knexPerformingQuery] = true;
117
+
118
+ let results;
119
+ let count;
120
+ let err;
121
+
122
+ try {
123
+ // Execute the query and store the results & count
124
+ results = await query;
125
+
126
+ const { populate } = this._qb.state;
127
+
128
+ // Apply populate if needed
129
+ if (populate) {
130
+ await applyPopulate(results, populate, { qb: this._qb, uid: this._uid, db: this._db });
131
+ }
132
+
133
+ // Map results if asked to
134
+ if (this._mapResults) {
135
+ results = fromRow(this._meta, results);
136
+ }
137
+
138
+ count = results.length;
139
+ } catch (e) {
140
+ err = e;
141
+ }
142
+
143
+ // Unlock the ._destroy()
144
+ this[knexPerformingQuery] = false;
145
+
146
+ // Tell ._destroy() that it's now safe to close the db connection
147
+ if (this.destroyed) {
148
+ this.emit(knexQueryDone);
149
+ return;
150
+ }
151
+
152
+ // If there is an error, destroy with the given error
153
+ if (err) {
154
+ this.destroy(err);
155
+ return;
156
+ }
157
+
158
+ // Update the amount of fetched entities
159
+ this._fetched += count;
160
+
161
+ // While there is at least one value to unpack
162
+ for (const result of results) {
163
+ this.push(result);
164
+ }
165
+
166
+ // If the amount of fetched entities is smaller than the
167
+ // maximum read size, Then push null to close the stream
168
+ if (this._fetched === this._limit || count < this._batchSize) {
169
+ this.push(null);
170
+ }
171
+ }
172
+ }
173
+
174
+ module.exports = ReadableStrapiQuery;
@@ -53,7 +53,7 @@ const castValue = (value, attribute) => {
53
53
  return value;
54
54
  }
55
55
 
56
- if (types.isScalar(attribute.type)) {
56
+ if (types.isScalar(attribute.type) && !isKnexQuery(value)) {
57
57
  const field = createField(attribute);
58
58
 
59
59
  return value === null ? null : field.toDB(value);
@@ -2,6 +2,7 @@
2
2
 
3
3
  const _ = require('lodash/fp');
4
4
 
5
+ const { DatabaseError } = require('../errors');
5
6
  const helpers = require('./helpers');
6
7
 
7
8
  const createQueryBuilder = (uid, db, initialState = {}) => {
@@ -488,6 +489,16 @@ const createQueryBuilder = (uid, db, initialState = {}) => {
488
489
  db.dialect.transformErrors(error);
489
490
  }
490
491
  },
492
+
493
+ stream({ mapResults = true } = {}) {
494
+ if (state.type === 'select') {
495
+ return new helpers.ReadableQuery({ qb: this, db, uid, mapResults });
496
+ }
497
+
498
+ throw new DatabaseError(
499
+ `query-builder.stream() has been called with an unsupported query type: "${state.type}"`
500
+ );
501
+ },
491
502
  };
492
503
  };
493
504
 
@@ -46,4 +46,4 @@ export interface SchemaProvider {
46
46
  drop(): Promise<void>;
47
47
  }
48
48
 
49
- export default function(db: Database): SchemaProvider;
49
+ export default function (db: Database): SchemaProvider;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@strapi/database",
3
- "version": "4.6.0-alpha.1",
3
+ "version": "4.6.0-beta.1",
4
4
  "description": "Strapi's database layer",
5
5
  "homepage": "https://strapi.io",
6
6
  "bugs": {
@@ -32,7 +32,7 @@
32
32
  },
33
33
  "dependencies": {
34
34
  "date-fns": "2.29.2",
35
- "debug": "4.3.1",
35
+ "debug": "4.3.4",
36
36
  "fs-extra": "10.0.0",
37
37
  "knex": "1.0.7",
38
38
  "lodash": "4.17.21",
@@ -43,5 +43,5 @@
43
43
  "node": ">=14.19.1 <=18.x.x",
44
44
  "npm": ">=6.0.0"
45
45
  },
46
- "gitHead": "9171c48104548f5f6da21abf2a8098009f1a40e9"
46
+ "gitHead": "2c0bcabdf0bf2a269fed50c6f23ba777845968a0"
47
47
  }