@strapi/database 4.5.2 → 4.6.0-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.d.ts
CHANGED
|
@@ -31,8 +31,7 @@ type AttributeOperators<T, K extends keyof T> = {
|
|
|
31
31
|
|
|
32
32
|
export type WhereParams<T> = {
|
|
33
33
|
[K in keyof T]?: T[K] | T[K][] | AttributeOperators<T, K>;
|
|
34
|
-
} &
|
|
35
|
-
LogicalOperators<T>;
|
|
34
|
+
} & LogicalOperators<T>;
|
|
36
35
|
|
|
37
36
|
type Sortables<T> = {
|
|
38
37
|
// check sortable
|
|
@@ -158,6 +157,7 @@ export interface Database {
|
|
|
158
157
|
lifecycles: LifecycleProvider;
|
|
159
158
|
migrations: MigrationProvider;
|
|
160
159
|
entityManager: EntityManager;
|
|
160
|
+
queryBuilder: any;
|
|
161
161
|
|
|
162
162
|
query<T extends keyof AllTypes>(uid: T): QueryFromContentType<T>;
|
|
163
163
|
}
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const { Readable } = require('stream');
|
|
4
|
+
const { isFinite } = require('lodash/fp');
|
|
5
|
+
|
|
6
|
+
const { applyPopulate } = require('../populate');
|
|
7
|
+
const { fromRow } = require('../transform');
|
|
8
|
+
|
|
9
|
+
const knexQueryDone = Symbol('knexQueryDone');
|
|
10
|
+
const knexPerformingQuery = Symbol('knexPerformingQuery');
|
|
11
|
+
|
|
12
|
+
class ReadableStrapiQuery extends Readable {
|
|
13
|
+
/**
|
|
14
|
+
* @param {object} options
|
|
15
|
+
* @param {ReturnType<typeof import('../../query-builder')>} options.qb The strapi query builder instance
|
|
16
|
+
* @param {string} options.uid The model uid
|
|
17
|
+
* @param {import('../../../index').Database} options.db The Database instance
|
|
18
|
+
* @param {boolean} [options.mapResults] The maximum number of entities to fetch per query
|
|
19
|
+
* @param {number} [options.batchSize] The maximum number of entities to fetch per query
|
|
20
|
+
*/
|
|
21
|
+
constructor({ qb, db, uid, mapResults = true, batchSize = 500 }) {
|
|
22
|
+
super({ objectMode: true, highWaterMark: batchSize });
|
|
23
|
+
|
|
24
|
+
// Extract offset & limit from the query-builder's state
|
|
25
|
+
const { offset, limit } = qb.state;
|
|
26
|
+
|
|
27
|
+
// Original offset value
|
|
28
|
+
this._offset = isFinite(offset) ? offset : 0;
|
|
29
|
+
|
|
30
|
+
// Max amount of entities to fetch, force null as undefined value
|
|
31
|
+
this._limit = isFinite(limit) ? limit : null;
|
|
32
|
+
|
|
33
|
+
// Total amount of entities fetched
|
|
34
|
+
this._fetched = 0;
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Original query
|
|
38
|
+
* @type {import('knex').Knex}
|
|
39
|
+
*/
|
|
40
|
+
this._query = qb.getKnexQuery();
|
|
41
|
+
|
|
42
|
+
// Query Builder instance
|
|
43
|
+
this._qb = qb;
|
|
44
|
+
|
|
45
|
+
// Database related properties
|
|
46
|
+
this._db = db;
|
|
47
|
+
this._uid = uid;
|
|
48
|
+
this._meta = db.metadata.get(uid);
|
|
49
|
+
|
|
50
|
+
// Stream params
|
|
51
|
+
this._batchSize = batchSize;
|
|
52
|
+
this._mapResults = mapResults;
|
|
53
|
+
|
|
54
|
+
// States
|
|
55
|
+
this[knexPerformingQuery] = false;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
_destroy(err, cb) {
|
|
59
|
+
// If the stream is destroyed while a query is being made, then wait for a
|
|
60
|
+
// kQueryDone event to be emitted before actually destroying the stream
|
|
61
|
+
if (this[knexPerformingQuery]) {
|
|
62
|
+
this.once(knexQueryDone, (er) => cb(err || er));
|
|
63
|
+
} else {
|
|
64
|
+
cb(err);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Custom ._read() implementation
|
|
70
|
+
*
|
|
71
|
+
* NOTE: Here "size" means the number of entities to be read from the database.
|
|
72
|
+
* Not the actual byte size, as it would means that we need to return partial entities.
|
|
73
|
+
*
|
|
74
|
+
* @param {number} size
|
|
75
|
+
*/
|
|
76
|
+
async _read(size) {
|
|
77
|
+
const query = this._query;
|
|
78
|
+
|
|
79
|
+
// Remove the original offset & limit properties from the query
|
|
80
|
+
// Theoretically, they would be replaced by calling them again, but this is just to be sure
|
|
81
|
+
query.clear('limit').clear('offset');
|
|
82
|
+
|
|
83
|
+
// Define the maximum read size based on the limit and the requested size
|
|
84
|
+
// NOTE: size is equal to _batchSize by default. Since we want to allow customizing it on
|
|
85
|
+
// the fly, we need to use its value instead of batchSize when computing the maxReadSize value
|
|
86
|
+
const maxReadSize =
|
|
87
|
+
// if no limit is defined in the query, use the given size,
|
|
88
|
+
// otherwise, use the smallest value between the two
|
|
89
|
+
this._limit === null ? size : Math.min(size, this._limit);
|
|
90
|
+
|
|
91
|
+
// Compute the limit for the next query
|
|
92
|
+
const limit =
|
|
93
|
+
// If a limit is defined
|
|
94
|
+
this._limit !== null &&
|
|
95
|
+
// And reading `maxReadSize` would fetch too many entities (> _limit)
|
|
96
|
+
this._fetched + maxReadSize > this._limit
|
|
97
|
+
? // Then adjust the limit so that it only get the remaining entities
|
|
98
|
+
this._limit - this._fetched
|
|
99
|
+
: // Else, use the max read size
|
|
100
|
+
maxReadSize;
|
|
101
|
+
|
|
102
|
+
// If we don't have anything left to read (_limit === _fetched),
|
|
103
|
+
// don't bother making the query and end the stream by pushing null
|
|
104
|
+
if (limit <= 0) {
|
|
105
|
+
this.push(null);
|
|
106
|
+
return;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Compute the offset (base offset + number of entities already fetched)
|
|
110
|
+
const offset = this._offset + this._fetched;
|
|
111
|
+
|
|
112
|
+
// Update the query with the new values (offset + limit)
|
|
113
|
+
query.offset(offset).limit(limit);
|
|
114
|
+
|
|
115
|
+
// Lock the ._destroy()
|
|
116
|
+
this[knexPerformingQuery] = true;
|
|
117
|
+
|
|
118
|
+
let results;
|
|
119
|
+
let count;
|
|
120
|
+
let err;
|
|
121
|
+
|
|
122
|
+
try {
|
|
123
|
+
// Execute the query and store the results & count
|
|
124
|
+
results = await query;
|
|
125
|
+
|
|
126
|
+
const { populate } = this._qb.state;
|
|
127
|
+
|
|
128
|
+
// Apply populate if needed
|
|
129
|
+
if (populate) {
|
|
130
|
+
await applyPopulate(results, populate, { qb: this._qb, uid: this._uid, db: this._db });
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// Map results if asked to
|
|
134
|
+
if (this._mapResults) {
|
|
135
|
+
results = fromRow(this._meta, results);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
count = results.length;
|
|
139
|
+
} catch (e) {
|
|
140
|
+
err = e;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// Unlock the ._destroy()
|
|
144
|
+
this[knexPerformingQuery] = false;
|
|
145
|
+
|
|
146
|
+
// Tell ._destroy() that it's now safe to close the db connection
|
|
147
|
+
if (this.destroyed) {
|
|
148
|
+
this.emit(knexQueryDone);
|
|
149
|
+
return;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// If there is an error, destroy with the given error
|
|
153
|
+
if (err) {
|
|
154
|
+
this.destroy(err);
|
|
155
|
+
return;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Update the amount of fetched entities
|
|
159
|
+
this._fetched += count;
|
|
160
|
+
|
|
161
|
+
// While there is at least one value to unpack
|
|
162
|
+
for (const result of results) {
|
|
163
|
+
this.push(result);
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
// If the amount of fetched entities is smaller than the
|
|
167
|
+
// maximum read size, Then push null to close the stream
|
|
168
|
+
if (this._fetched === this._limit || count < this._batchSize) {
|
|
169
|
+
this.push(null);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
module.exports = ReadableStrapiQuery;
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
const _ = require('lodash/fp');
|
|
4
4
|
|
|
5
|
+
const { DatabaseError } = require('../errors');
|
|
5
6
|
const helpers = require('./helpers');
|
|
6
7
|
|
|
7
8
|
const createQueryBuilder = (uid, db, initialState = {}) => {
|
|
@@ -488,6 +489,16 @@ const createQueryBuilder = (uid, db, initialState = {}) => {
|
|
|
488
489
|
db.dialect.transformErrors(error);
|
|
489
490
|
}
|
|
490
491
|
},
|
|
492
|
+
|
|
493
|
+
stream({ mapResults = true } = {}) {
|
|
494
|
+
if (state.type === 'select') {
|
|
495
|
+
return new helpers.ReadableQuery({ qb: this, db, uid, mapResults });
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
throw new DatabaseError(
|
|
499
|
+
`query-builder.stream() has been called with an unsupported query type: "${state.type}"`
|
|
500
|
+
);
|
|
501
|
+
},
|
|
491
502
|
};
|
|
492
503
|
};
|
|
493
504
|
|
package/lib/schema/index.d.ts
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@strapi/database",
|
|
3
|
-
"version": "4.
|
|
3
|
+
"version": "4.6.0-alpha.0",
|
|
4
4
|
"description": "Strapi's database layer",
|
|
5
5
|
"homepage": "https://strapi.io",
|
|
6
6
|
"bugs": {
|
|
@@ -43,5 +43,5 @@
|
|
|
43
43
|
"node": ">=14.19.1 <=18.x.x",
|
|
44
44
|
"npm": ">=6.0.0"
|
|
45
45
|
},
|
|
46
|
-
"gitHead": "
|
|
46
|
+
"gitHead": "b7a87dcffc6f44e18eedef92e354096ffe32ce0c"
|
|
47
47
|
}
|