@webiny/api-headless-cms-ddb 0.0.0-mt-1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +25 -0
- package/definitions/entry.d.ts +8 -0
- package/definitions/entry.js +97 -0
- package/definitions/group.d.ts +8 -0
- package/definitions/group.js +74 -0
- package/definitions/model.d.ts +8 -0
- package/definitions/model.js +96 -0
- package/definitions/settings.d.ts +8 -0
- package/definitions/settings.js +62 -0
- package/definitions/system.d.ts +8 -0
- package/definitions/system.js +50 -0
- package/definitions/table.d.ts +8 -0
- package/definitions/table.js +30 -0
- package/dynamoDb/index.d.ts +2 -0
- package/dynamoDb/index.js +24 -0
- package/dynamoDb/path/plainObject.d.ts +3 -0
- package/dynamoDb/path/plainObject.js +33 -0
- package/dynamoDb/path/ref.d.ts +3 -0
- package/dynamoDb/path/ref.js +27 -0
- package/dynamoDb/storage/date.d.ts +3 -0
- package/dynamoDb/storage/date.js +65 -0
- package/dynamoDb/storage/longText.d.ts +7 -0
- package/dynamoDb/storage/longText.js +83 -0
- package/dynamoDb/storage/richText.d.ts +8 -0
- package/dynamoDb/storage/richText.js +110 -0
- package/dynamoDb/transformValue/datetime.d.ts +3 -0
- package/dynamoDb/transformValue/datetime.js +47 -0
- package/index.d.ts +2 -0
- package/index.js +125 -0
- package/operations/entry/dataLoaders.d.ts +38 -0
- package/operations/entry/dataLoaders.js +303 -0
- package/operations/entry/index.d.ts +8 -0
- package/operations/entry/index.js +823 -0
- package/operations/entry/keys.d.ts +25 -0
- package/operations/entry/keys.js +62 -0
- package/operations/entry/systemFields.d.ts +2 -0
- package/operations/entry/systemFields.js +50 -0
- package/operations/entry/utils.d.ts +31 -0
- package/operations/entry/utils.js +406 -0
- package/operations/group/index.d.ts +8 -0
- package/operations/group/index.js +198 -0
- package/operations/model/index.d.ts +6 -0
- package/operations/model/index.js +161 -0
- package/operations/settings/index.d.ts +6 -0
- package/operations/settings/index.js +141 -0
- package/operations/system/index.d.ts +6 -0
- package/operations/system/index.js +105 -0
- package/package.json +61 -0
- package/types.d.ts +84 -0
- package/types.js +16 -0
|
@@ -0,0 +1,823 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
|
|
3
|
+
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
|
|
4
|
+
|
|
5
|
+
Object.defineProperty(exports, "__esModule", {
|
|
6
|
+
value: true
|
|
7
|
+
});
|
|
8
|
+
exports.createEntriesStorageOperations = void 0;
|
|
9
|
+
|
|
10
|
+
var _defineProperty2 = _interopRequireDefault(require("@babel/runtime/helpers/defineProperty"));
|
|
11
|
+
|
|
12
|
+
var _error = _interopRequireDefault(require("@webiny/error"));
|
|
13
|
+
|
|
14
|
+
var _dataLoaders = require("./dataLoaders");
|
|
15
|
+
|
|
16
|
+
var _types = require("@webiny/api-headless-cms/types");
|
|
17
|
+
|
|
18
|
+
var _utils = require("./utils");
|
|
19
|
+
|
|
20
|
+
var _keys = require("./keys");
|
|
21
|
+
|
|
22
|
+
var _batchWrite = require("@webiny/db-dynamodb/utils/batchWrite");
|
|
23
|
+
|
|
24
|
+
var _query = require("@webiny/db-dynamodb/utils/query");
|
|
25
|
+
|
|
26
|
+
var _cleanup = require("@webiny/db-dynamodb/utils/cleanup");
|
|
27
|
+
|
|
28
|
+
var _cursor = require("@webiny/utils/cursor");
|
|
29
|
+
|
|
30
|
+
var _zeroPad = require("@webiny/utils/zeroPad");
|
|
31
|
+
|
|
32
|
+
var _apiHeadlessCms = require("@webiny/api-headless-cms");
|
|
33
|
+
|
|
34
|
+
function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) { symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); } keys.push.apply(keys, symbols); } return keys; }
|
|
35
|
+
|
|
36
|
+
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { (0, _defineProperty2.default)(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; }
|
|
37
|
+
|
|
38
|
+
const createType = () => {
|
|
39
|
+
return "cms.entry";
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
const createLatestType = () => {
|
|
43
|
+
return `${createType()}.l`;
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
const createPublishedType = () => {
|
|
47
|
+
return `${createType()}.p`;
|
|
48
|
+
};
|
|
49
|
+
|
|
50
|
+
const createEntriesStorageOperations = params => {
|
|
51
|
+
const {
|
|
52
|
+
entity,
|
|
53
|
+
plugins
|
|
54
|
+
} = params;
|
|
55
|
+
const dataLoaders = new _dataLoaders.DataLoadersHandler({
|
|
56
|
+
entity
|
|
57
|
+
});
|
|
58
|
+
const storageTransformPlugins = plugins.byType(_apiHeadlessCms.StorageTransformPlugin.type).reduce((collection, plugin) => {
|
|
59
|
+
collection[plugin.fieldType] = plugin;
|
|
60
|
+
return collection;
|
|
61
|
+
}, {});
|
|
62
|
+
|
|
63
|
+
const createStorageTransformCallable = model => {
|
|
64
|
+
return (field, value) => {
|
|
65
|
+
const plugin = storageTransformPlugins[field.type];
|
|
66
|
+
|
|
67
|
+
if (!plugin) {
|
|
68
|
+
return value;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
return plugin.fromStorage({
|
|
72
|
+
model,
|
|
73
|
+
field,
|
|
74
|
+
value,
|
|
75
|
+
|
|
76
|
+
getStoragePlugin(fieldType) {
|
|
77
|
+
return storageTransformPlugins[fieldType] || storageTransformPlugins["*"];
|
|
78
|
+
},
|
|
79
|
+
|
|
80
|
+
plugins
|
|
81
|
+
});
|
|
82
|
+
};
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
const create = async (model, args) => {
|
|
86
|
+
const {
|
|
87
|
+
entry,
|
|
88
|
+
storageEntry
|
|
89
|
+
} = args;
|
|
90
|
+
const partitionKey = (0, _keys.createPartitionKey)(entry);
|
|
91
|
+
/**
|
|
92
|
+
* We need to:
|
|
93
|
+
* - create new main entry item
|
|
94
|
+
* - create new or update latest entry item
|
|
95
|
+
*/
|
|
96
|
+
|
|
97
|
+
const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
98
|
+
PK: partitionKey,
|
|
99
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
100
|
+
TYPE: createType(),
|
|
101
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
102
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
103
|
+
})), entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
104
|
+
PK: partitionKey,
|
|
105
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
106
|
+
TYPE: createLatestType(),
|
|
107
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
108
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
109
|
+
}))];
|
|
110
|
+
|
|
111
|
+
try {
|
|
112
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
113
|
+
table: entity.table,
|
|
114
|
+
items
|
|
115
|
+
});
|
|
116
|
+
dataLoaders.clearAll({
|
|
117
|
+
model
|
|
118
|
+
});
|
|
119
|
+
} catch (ex) {
|
|
120
|
+
throw new _error.default(ex.message || "Could not insert data into the DynamoDB.", ex.code || "CREATE_ENTRY_ERROR", {
|
|
121
|
+
error: ex,
|
|
122
|
+
entry
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return storageEntry;
|
|
127
|
+
};
|
|
128
|
+
|
|
129
|
+
const createRevisionFrom = async (model, params) => {
|
|
130
|
+
const {
|
|
131
|
+
originalEntry,
|
|
132
|
+
entry,
|
|
133
|
+
storageEntry,
|
|
134
|
+
latestEntry
|
|
135
|
+
} = params;
|
|
136
|
+
const partitionKey = (0, _keys.createPartitionKey)(storageEntry);
|
|
137
|
+
/**
|
|
138
|
+
* We need to:
|
|
139
|
+
* - create the main entry item
|
|
140
|
+
* - update the last entry item to a current one
|
|
141
|
+
*/
|
|
142
|
+
|
|
143
|
+
const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
144
|
+
PK: partitionKey,
|
|
145
|
+
SK: (0, _keys.createRevisionSortKey)(storageEntry),
|
|
146
|
+
TYPE: createType(),
|
|
147
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
148
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
149
|
+
})), entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
150
|
+
PK: partitionKey,
|
|
151
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
152
|
+
TYPE: createLatestType(),
|
|
153
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
154
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
155
|
+
}))];
|
|
156
|
+
|
|
157
|
+
try {
|
|
158
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
159
|
+
table: entity.table,
|
|
160
|
+
items
|
|
161
|
+
});
|
|
162
|
+
dataLoaders.clearAll({
|
|
163
|
+
model
|
|
164
|
+
});
|
|
165
|
+
} catch (ex) {
|
|
166
|
+
throw new _error.default(ex.message || "Could not create revision from given entry.", ex.code || "CREATE_REVISION_ERROR", {
|
|
167
|
+
error: ex,
|
|
168
|
+
originalEntry,
|
|
169
|
+
latestEntry,
|
|
170
|
+
entry,
|
|
171
|
+
storageEntry
|
|
172
|
+
});
|
|
173
|
+
}
|
|
174
|
+
/**
|
|
175
|
+
* There are no modifications on the entry created so just return the data.
|
|
176
|
+
*/
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
return storageEntry;
|
|
180
|
+
};
|
|
181
|
+
|
|
182
|
+
const update = async (model, params) => {
|
|
183
|
+
const {
|
|
184
|
+
originalEntry,
|
|
185
|
+
entry,
|
|
186
|
+
storageEntry
|
|
187
|
+
} = params;
|
|
188
|
+
const partitionKey = (0, _keys.createPartitionKey)(originalEntry);
|
|
189
|
+
const items = [];
|
|
190
|
+
/**
|
|
191
|
+
* We need to:
|
|
192
|
+
* - update the current entry
|
|
193
|
+
* - update the latest entry if the current entry is the latest one
|
|
194
|
+
*/
|
|
195
|
+
|
|
196
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
197
|
+
PK: partitionKey,
|
|
198
|
+
SK: (0, _keys.createRevisionSortKey)(storageEntry),
|
|
199
|
+
TYPE: createType(),
|
|
200
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
201
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
202
|
+
})));
|
|
203
|
+
/**
|
|
204
|
+
* We need the latest entry to update it as well if neccessary.
|
|
205
|
+
*/
|
|
206
|
+
|
|
207
|
+
const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
|
|
208
|
+
|
|
209
|
+
if (latestStorageEntry && latestStorageEntry.id === entry.id) {
|
|
210
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
211
|
+
PK: partitionKey,
|
|
212
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
213
|
+
TYPE: createLatestType(),
|
|
214
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
215
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
216
|
+
})));
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
try {
|
|
220
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
221
|
+
table: entity.table,
|
|
222
|
+
items
|
|
223
|
+
});
|
|
224
|
+
dataLoaders.clearAll({
|
|
225
|
+
model
|
|
226
|
+
});
|
|
227
|
+
return storageEntry;
|
|
228
|
+
} catch (ex) {
|
|
229
|
+
throw new _error.default(ex.message || "Could not update entry.", ex.code || "UPDATE_ERROR", {
|
|
230
|
+
error: ex,
|
|
231
|
+
originalEntry,
|
|
232
|
+
entry,
|
|
233
|
+
latestStorageEntry
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
};
|
|
237
|
+
|
|
238
|
+
const deleteEntry = async (model, params) => {
|
|
239
|
+
const {
|
|
240
|
+
entry
|
|
241
|
+
} = params;
|
|
242
|
+
const queryAllParams = {
|
|
243
|
+
entity,
|
|
244
|
+
partitionKey: (0, _keys.createPartitionKey)(entry),
|
|
245
|
+
options: {
|
|
246
|
+
gte: " "
|
|
247
|
+
}
|
|
248
|
+
};
|
|
249
|
+
let records = [];
|
|
250
|
+
|
|
251
|
+
try {
|
|
252
|
+
records = await (0, _query.queryAll)(queryAllParams);
|
|
253
|
+
} catch (ex) {
|
|
254
|
+
throw new _error.default(ex.message || "Could not load all records.", ex.code || "LOAD_ALL_RECORDS_ERROR", {
|
|
255
|
+
error: ex,
|
|
256
|
+
entry
|
|
257
|
+
});
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
const items = records.map(item => {
|
|
261
|
+
return entity.deleteBatch({
|
|
262
|
+
PK: item.PK,
|
|
263
|
+
SK: item.SK
|
|
264
|
+
});
|
|
265
|
+
});
|
|
266
|
+
|
|
267
|
+
try {
|
|
268
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
269
|
+
table: entity.table,
|
|
270
|
+
items
|
|
271
|
+
});
|
|
272
|
+
dataLoaders.clearAll({
|
|
273
|
+
model
|
|
274
|
+
});
|
|
275
|
+
} catch (ex) {
|
|
276
|
+
throw new _error.default(ex.message || "Could not delete the entry.", ex.code || "DELETE_ENTRY_ERROR", {
|
|
277
|
+
error: ex,
|
|
278
|
+
partitionKey: queryAllParams.partitionKey,
|
|
279
|
+
entry
|
|
280
|
+
});
|
|
281
|
+
}
|
|
282
|
+
};
|
|
283
|
+
|
|
284
|
+
const deleteRevision = async (model, params) => {
|
|
285
|
+
const {
|
|
286
|
+
entryToDelete,
|
|
287
|
+
entryToSetAsLatest,
|
|
288
|
+
storageEntryToSetAsLatest
|
|
289
|
+
} = params;
|
|
290
|
+
const partitionKey = (0, _keys.createPartitionKey)(entryToDelete);
|
|
291
|
+
const items = [entity.deleteBatch({
|
|
292
|
+
PK: partitionKey,
|
|
293
|
+
SK: (0, _keys.createRevisionSortKey)(entryToDelete)
|
|
294
|
+
})];
|
|
295
|
+
const publishedStorageEntry = await getPublishedRevisionByEntryId(model, entryToDelete);
|
|
296
|
+
/**
|
|
297
|
+
* If revision we are deleting is the published one as well, we need to delete those records as well.
|
|
298
|
+
*/
|
|
299
|
+
|
|
300
|
+
if (publishedStorageEntry && entryToDelete.id === publishedStorageEntry.id) {
|
|
301
|
+
items.push(entity.deleteBatch({
|
|
302
|
+
PK: partitionKey,
|
|
303
|
+
SK: (0, _keys.createPublishedSortKey)()
|
|
304
|
+
}));
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
if (storageEntryToSetAsLatest) {
|
|
308
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntryToSetAsLatest), {}, {
|
|
309
|
+
PK: partitionKey,
|
|
310
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
311
|
+
TYPE: createLatestType(),
|
|
312
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
313
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntryToSetAsLatest)
|
|
314
|
+
})));
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
try {
|
|
318
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
319
|
+
table: entity.table,
|
|
320
|
+
items
|
|
321
|
+
});
|
|
322
|
+
dataLoaders.clearAll({
|
|
323
|
+
model
|
|
324
|
+
});
|
|
325
|
+
} catch (ex) {
|
|
326
|
+
throw new _error.default(ex.message, ex.code, {
|
|
327
|
+
error: ex,
|
|
328
|
+
entryToDelete,
|
|
329
|
+
entryToSetAsLatest
|
|
330
|
+
});
|
|
331
|
+
}
|
|
332
|
+
};
|
|
333
|
+
|
|
334
|
+
const getAllRevisionsByIds = async (model, params) => {
|
|
335
|
+
return await dataLoaders.getAllEntryRevisions({
|
|
336
|
+
model,
|
|
337
|
+
ids: params.ids
|
|
338
|
+
});
|
|
339
|
+
};
|
|
340
|
+
|
|
341
|
+
const getLatestRevisionByEntryId = async (model, params) => {
|
|
342
|
+
const result = await dataLoaders.getLatestRevisionByEntryId({
|
|
343
|
+
model,
|
|
344
|
+
ids: [params.id]
|
|
345
|
+
});
|
|
346
|
+
|
|
347
|
+
if (result.length === 0) {
|
|
348
|
+
return null;
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
return result.shift();
|
|
352
|
+
};
|
|
353
|
+
|
|
354
|
+
const getPublishedRevisionByEntryId = async (model, params) => {
|
|
355
|
+
const result = await dataLoaders.getPublishedRevisionByEntryId({
|
|
356
|
+
model,
|
|
357
|
+
ids: [params.id]
|
|
358
|
+
});
|
|
359
|
+
|
|
360
|
+
if (result.length === 0) {
|
|
361
|
+
return null;
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
return result.shift();
|
|
365
|
+
};
|
|
366
|
+
|
|
367
|
+
const getRevisionById = async (model, params) => {
|
|
368
|
+
const result = await dataLoaders.getRevisionById({
|
|
369
|
+
model,
|
|
370
|
+
ids: [params.id]
|
|
371
|
+
});
|
|
372
|
+
|
|
373
|
+
if (result.length === 0) {
|
|
374
|
+
return null;
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
return result.shift();
|
|
378
|
+
};
|
|
379
|
+
|
|
380
|
+
const getRevisions = async (model, params) => {
|
|
381
|
+
return await dataLoaders.getAllEntryRevisions({
|
|
382
|
+
model,
|
|
383
|
+
ids: [params.id]
|
|
384
|
+
});
|
|
385
|
+
};
|
|
386
|
+
|
|
387
|
+
const getByIds = async (model, params) => {
|
|
388
|
+
return dataLoaders.getRevisionById({
|
|
389
|
+
model,
|
|
390
|
+
ids: params.ids
|
|
391
|
+
});
|
|
392
|
+
};
|
|
393
|
+
|
|
394
|
+
const getLatestByIds = async (model, params) => {
|
|
395
|
+
return dataLoaders.getLatestRevisionByEntryId({
|
|
396
|
+
model,
|
|
397
|
+
ids: params.ids
|
|
398
|
+
});
|
|
399
|
+
};
|
|
400
|
+
|
|
401
|
+
const getPublishedByIds = async (model, params) => {
|
|
402
|
+
return dataLoaders.getPublishedRevisionByEntryId({
|
|
403
|
+
model,
|
|
404
|
+
ids: params.ids
|
|
405
|
+
});
|
|
406
|
+
};
|
|
407
|
+
|
|
408
|
+
const getPreviousRevision = async (model, params) => {
|
|
409
|
+
const {
|
|
410
|
+
tenant,
|
|
411
|
+
locale
|
|
412
|
+
} = model;
|
|
413
|
+
const {
|
|
414
|
+
entryId,
|
|
415
|
+
version
|
|
416
|
+
} = params;
|
|
417
|
+
const queryParams = {
|
|
418
|
+
entity,
|
|
419
|
+
partitionKey: (0, _keys.createPartitionKey)({
|
|
420
|
+
tenant,
|
|
421
|
+
locale,
|
|
422
|
+
id: entryId
|
|
423
|
+
}),
|
|
424
|
+
options: {
|
|
425
|
+
lt: `REV#${(0, _zeroPad.zeroPad)(version)}`,
|
|
426
|
+
|
|
427
|
+
/**
|
|
428
|
+
* We need to have extra checks because DynamoDB will return published or latest record if there is no REV# record.
|
|
429
|
+
*/
|
|
430
|
+
filters: [{
|
|
431
|
+
attr: "TYPE",
|
|
432
|
+
eq: createType()
|
|
433
|
+
}, {
|
|
434
|
+
attr: "version",
|
|
435
|
+
lt: version
|
|
436
|
+
}],
|
|
437
|
+
reverse: true
|
|
438
|
+
}
|
|
439
|
+
};
|
|
440
|
+
|
|
441
|
+
try {
|
|
442
|
+
const result = await (0, _query.queryOne)(queryParams);
|
|
443
|
+
return (0, _cleanup.cleanupItem)(entity, result);
|
|
444
|
+
} catch (ex) {
|
|
445
|
+
throw new _error.default(ex.message || "Could not get previous version of given entry.", ex.code || "GET_PREVIOUS_VERSION_ERROR", _objectSpread(_objectSpread({}, params), {}, {
|
|
446
|
+
error: ex,
|
|
447
|
+
partitionKey: queryParams.partitionKey,
|
|
448
|
+
options: queryParams.options,
|
|
449
|
+
model
|
|
450
|
+
}));
|
|
451
|
+
}
|
|
452
|
+
};
|
|
453
|
+
|
|
454
|
+
const list = async (model, params) => {
|
|
455
|
+
const {
|
|
456
|
+
limit: initialLimit = 10,
|
|
457
|
+
where: originalWhere,
|
|
458
|
+
after,
|
|
459
|
+
sort
|
|
460
|
+
} = params;
|
|
461
|
+
const limit = initialLimit <= 0 || initialLimit >= 100 ? 100 : initialLimit;
|
|
462
|
+
const type = originalWhere.published ? "P" : "L";
|
|
463
|
+
const queryAllParams = {
|
|
464
|
+
entity,
|
|
465
|
+
partitionKey: (0, _keys.createGSIPartitionKey)(model, type),
|
|
466
|
+
options: {
|
|
467
|
+
index: "GSI1",
|
|
468
|
+
gte: " "
|
|
469
|
+
}
|
|
470
|
+
};
|
|
471
|
+
let records = [];
|
|
472
|
+
|
|
473
|
+
try {
|
|
474
|
+
records = await (0, _query.queryAll)(queryAllParams);
|
|
475
|
+
} catch (ex) {
|
|
476
|
+
throw new _error.default(ex.message, "QUERY_ENTRIES_ERROR", {
|
|
477
|
+
error: ex,
|
|
478
|
+
partitionKey: queryAllParams.partitionKey,
|
|
479
|
+
options: queryAllParams.options
|
|
480
|
+
});
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
if (records.length === 0) {
|
|
484
|
+
return {
|
|
485
|
+
hasMoreItems: false,
|
|
486
|
+
totalCount: 0,
|
|
487
|
+
cursor: null,
|
|
488
|
+
items: []
|
|
489
|
+
};
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
const where = _objectSpread({}, originalWhere);
|
|
493
|
+
|
|
494
|
+
delete where["published"];
|
|
495
|
+
delete where["latest"];
|
|
496
|
+
delete where["locale"];
|
|
497
|
+
delete where["tenant"];
|
|
498
|
+
/**
|
|
499
|
+
* We need a object containing field, transformers and paths.
|
|
500
|
+
* Just build it here and pass on into other methods that require it to avoid mapping multiple times.
|
|
501
|
+
*/
|
|
502
|
+
|
|
503
|
+
const modelFields = (0, _utils.buildModelFields)({
|
|
504
|
+
plugins,
|
|
505
|
+
model
|
|
506
|
+
});
|
|
507
|
+
/**
|
|
508
|
+
* Filter the read items via the code.
|
|
509
|
+
* It will build the filters out of the where input and transform the values it is using.
|
|
510
|
+
*/
|
|
511
|
+
|
|
512
|
+
const filteredItems = await (0, _utils.filterItems)({
|
|
513
|
+
items: records,
|
|
514
|
+
where,
|
|
515
|
+
plugins,
|
|
516
|
+
fields: modelFields,
|
|
517
|
+
fromStorage: createStorageTransformCallable(model)
|
|
518
|
+
});
|
|
519
|
+
const totalCount = filteredItems.length;
|
|
520
|
+
/**
|
|
521
|
+
* Sorting is also done via the code.
|
|
522
|
+
* It takes the sort input and sorts by it via the lodash sortBy method.
|
|
523
|
+
*/
|
|
524
|
+
|
|
525
|
+
const sortedItems = (0, _utils.sortEntryItems)({
|
|
526
|
+
items: filteredItems,
|
|
527
|
+
sort,
|
|
528
|
+
fields: modelFields
|
|
529
|
+
});
|
|
530
|
+
const start = (0, _cursor.decodeCursor)(after) || 0;
|
|
531
|
+
const hasMoreItems = totalCount > start + limit;
|
|
532
|
+
const end = limit > totalCount + start + limit ? undefined : start + limit;
|
|
533
|
+
const slicedItems = sortedItems.slice(start, end);
|
|
534
|
+
/**
|
|
535
|
+
* Although we do not need a cursor here, we will use it as such to keep it standardized.
|
|
536
|
+
* Number is simply encoded.
|
|
537
|
+
*/
|
|
538
|
+
|
|
539
|
+
const cursor = totalCount > start + limit ? (0, _cursor.encodeCursor)(start + limit) : null;
|
|
540
|
+
return {
|
|
541
|
+
hasMoreItems,
|
|
542
|
+
totalCount,
|
|
543
|
+
cursor,
|
|
544
|
+
items: (0, _cleanup.cleanupItems)(entity, slicedItems)
|
|
545
|
+
};
|
|
546
|
+
};
|
|
547
|
+
|
|
548
|
+
const get = async (model, params) => {
|
|
549
|
+
const {
|
|
550
|
+
items
|
|
551
|
+
} = await list(model, _objectSpread(_objectSpread({}, params), {}, {
|
|
552
|
+
limit: 1
|
|
553
|
+
}));
|
|
554
|
+
|
|
555
|
+
if (items.length === 0) {
|
|
556
|
+
return null;
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
return items.shift();
|
|
560
|
+
};
|
|
561
|
+
|
|
562
|
+
const requestChanges = async (model, params) => {
|
|
563
|
+
const {
|
|
564
|
+
entry,
|
|
565
|
+
storageEntry,
|
|
566
|
+
originalEntry
|
|
567
|
+
} = params;
|
|
568
|
+
const partitionKey = (0, _keys.createPartitionKey)(entry);
|
|
569
|
+
/**
|
|
570
|
+
* We need to:
|
|
571
|
+
* - update the existing entry
|
|
572
|
+
* - update latest version - if existing entry is the latest version
|
|
573
|
+
*/
|
|
574
|
+
|
|
575
|
+
const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
576
|
+
TYPE: createType(),
|
|
577
|
+
PK: partitionKey,
|
|
578
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
579
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
580
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
581
|
+
}))];
|
|
582
|
+
/**
|
|
583
|
+
* We need the latest entry to see if something needs to be updated along side the request changes one.
|
|
584
|
+
*/
|
|
585
|
+
|
|
586
|
+
const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
|
|
587
|
+
|
|
588
|
+
if (latestStorageEntry.id === entry.id) {
|
|
589
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
590
|
+
PK: partitionKey,
|
|
591
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
592
|
+
TYPE: createLatestType(),
|
|
593
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
594
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
595
|
+
})));
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
try {
|
|
599
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
600
|
+
table: entity.table,
|
|
601
|
+
items
|
|
602
|
+
});
|
|
603
|
+
dataLoaders.clearAll({
|
|
604
|
+
model
|
|
605
|
+
});
|
|
606
|
+
} catch (ex) {
|
|
607
|
+
throw new _error.default(ex.message || "Could not execute the request changes batch.", ex.code || "REQUEST_CHANGES_ERROR", {
|
|
608
|
+
entry,
|
|
609
|
+
originalEntry
|
|
610
|
+
});
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
return entry;
|
|
614
|
+
};
|
|
615
|
+
|
|
616
|
+
const requestReview = async (model, params) => {
|
|
617
|
+
const {
|
|
618
|
+
entry,
|
|
619
|
+
storageEntry,
|
|
620
|
+
originalEntry
|
|
621
|
+
} = params;
|
|
622
|
+
const partitionKey = (0, _keys.createPartitionKey)(entry);
|
|
623
|
+
/**
|
|
624
|
+
* We need to:
|
|
625
|
+
* - update existing entry
|
|
626
|
+
* - update latest entry - if existing entry is the latest entry
|
|
627
|
+
*/
|
|
628
|
+
|
|
629
|
+
const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
630
|
+
TYPE: createType(),
|
|
631
|
+
PK: partitionKey,
|
|
632
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
633
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
634
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
635
|
+
}))];
|
|
636
|
+
/**
|
|
637
|
+
* We need the latest entry to see if something needs to be updated along side the request review one.
|
|
638
|
+
*/
|
|
639
|
+
|
|
640
|
+
const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
|
|
641
|
+
|
|
642
|
+
if (latestStorageEntry.id === entry.id) {
|
|
643
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
644
|
+
PK: partitionKey,
|
|
645
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
646
|
+
TYPE: createLatestType(),
|
|
647
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
648
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
649
|
+
})));
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
try {
|
|
653
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
654
|
+
table: entity.table,
|
|
655
|
+
items
|
|
656
|
+
});
|
|
657
|
+
dataLoaders.clearAll({
|
|
658
|
+
model
|
|
659
|
+
});
|
|
660
|
+
return entry;
|
|
661
|
+
} catch (ex) {
|
|
662
|
+
throw new _error.default(ex.message || "Could not execute request review batch.", ex.code || "REQUEST_REVIEW_ERROR", {
|
|
663
|
+
entry,
|
|
664
|
+
storageEntry,
|
|
665
|
+
originalEntry
|
|
666
|
+
});
|
|
667
|
+
}
|
|
668
|
+
};
|
|
669
|
+
|
|
670
|
+
const publish = async (model, params) => {
|
|
671
|
+
const {
|
|
672
|
+
entry,
|
|
673
|
+
storageEntry
|
|
674
|
+
} = params;
|
|
675
|
+
const partitionKey = (0, _keys.createPartitionKey)(entry);
|
|
676
|
+
/**
|
|
677
|
+
* We need the latest and published entries to see if something needs to be updated along side the publishing one.
|
|
678
|
+
*/
|
|
679
|
+
|
|
680
|
+
const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
|
|
681
|
+
const publishedStorageEntry = await getPublishedRevisionByEntryId(model, entry);
|
|
682
|
+
/**
|
|
683
|
+
* We need to update:
|
|
684
|
+
* - current entry revision sort key
|
|
685
|
+
* - published sort key
|
|
686
|
+
* - latest sort key - if entry updated is actually latest
|
|
687
|
+
* - previous published entry to unpublished status - if any previously published entry
|
|
688
|
+
*/
|
|
689
|
+
|
|
690
|
+
const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
691
|
+
PK: partitionKey,
|
|
692
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
693
|
+
TYPE: createType(),
|
|
694
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
695
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
696
|
+
})), entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
697
|
+
PK: partitionKey,
|
|
698
|
+
SK: (0, _keys.createPublishedSortKey)(),
|
|
699
|
+
TYPE: createPublishedType(),
|
|
700
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "P"),
|
|
701
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
702
|
+
}))];
|
|
703
|
+
|
|
704
|
+
if (entry.id === latestStorageEntry.id) {
|
|
705
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
706
|
+
PK: partitionKey,
|
|
707
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
708
|
+
TYPE: createLatestType(),
|
|
709
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
710
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
711
|
+
})));
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
if (publishedStorageEntry) {
|
|
715
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread({}, publishedStorageEntry), {}, {
|
|
716
|
+
PK: partitionKey,
|
|
717
|
+
SK: (0, _keys.createRevisionSortKey)(publishedStorageEntry),
|
|
718
|
+
TYPE: createType(),
|
|
719
|
+
status: _types.CONTENT_ENTRY_STATUS.UNPUBLISHED,
|
|
720
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
721
|
+
GSI1_SK: (0, _keys.createGSISortKey)(publishedStorageEntry)
|
|
722
|
+
})));
|
|
723
|
+
}
|
|
724
|
+
|
|
725
|
+
try {
|
|
726
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
727
|
+
table: entity.table,
|
|
728
|
+
items
|
|
729
|
+
});
|
|
730
|
+
dataLoaders.clearAll({
|
|
731
|
+
model
|
|
732
|
+
});
|
|
733
|
+
return entry;
|
|
734
|
+
} catch (ex) {
|
|
735
|
+
throw new _error.default(ex.message || "Could not execute the publishing batch.", ex.code || "PUBLISH_ERROR", {
|
|
736
|
+
entry,
|
|
737
|
+
latestStorageEntry,
|
|
738
|
+
publishedStorageEntry
|
|
739
|
+
});
|
|
740
|
+
}
|
|
741
|
+
};
|
|
742
|
+
|
|
743
|
+
const unpublish = async (model, params) => {
|
|
744
|
+
const {
|
|
745
|
+
entry,
|
|
746
|
+
storageEntry
|
|
747
|
+
} = params;
|
|
748
|
+
const partitionKey = (0, _keys.createPartitionKey)(entry);
|
|
749
|
+
/**
|
|
750
|
+
* We need to:
|
|
751
|
+
* - delete currently published entry
|
|
752
|
+
* - update current entry revision with new data
|
|
753
|
+
* - update latest entry status - if entry being unpublished is latest
|
|
754
|
+
*/
|
|
755
|
+
|
|
756
|
+
const items = [entity.deleteBatch({
|
|
757
|
+
PK: partitionKey,
|
|
758
|
+
SK: (0, _keys.createPublishedSortKey)()
|
|
759
|
+
}), entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
760
|
+
PK: partitionKey,
|
|
761
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
762
|
+
TYPE: createType(),
|
|
763
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
764
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
765
|
+
}))];
|
|
766
|
+
/**
|
|
767
|
+
* We need the latest entry to see if something needs to be updated along side the unpublishing one.
|
|
768
|
+
*/
|
|
769
|
+
|
|
770
|
+
const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
|
|
771
|
+
|
|
772
|
+
if (entry.id === latestStorageEntry.id) {
|
|
773
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
774
|
+
PK: partitionKey,
|
|
775
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
776
|
+
TYPE: createLatestType(),
|
|
777
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
778
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
779
|
+
})));
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
try {
|
|
783
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
784
|
+
table: entity.table,
|
|
785
|
+
items
|
|
786
|
+
});
|
|
787
|
+
dataLoaders.clearAll({
|
|
788
|
+
model
|
|
789
|
+
});
|
|
790
|
+
return storageEntry;
|
|
791
|
+
} catch (ex) {
|
|
792
|
+
throw new _error.default(ex.message || "Could not execute unpublish batch.", ex.code || "UNPUBLISH_ERROR", {
|
|
793
|
+
entry,
|
|
794
|
+
storageEntry
|
|
795
|
+
});
|
|
796
|
+
}
|
|
797
|
+
};
|
|
798
|
+
|
|
799
|
+
return {
|
|
800
|
+
create,
|
|
801
|
+
createRevisionFrom,
|
|
802
|
+
update,
|
|
803
|
+
delete: deleteEntry,
|
|
804
|
+
deleteRevision,
|
|
805
|
+
getPreviousRevision,
|
|
806
|
+
getPublishedByIds,
|
|
807
|
+
getLatestByIds,
|
|
808
|
+
getByIds,
|
|
809
|
+
getRevisionById,
|
|
810
|
+
getPublishedRevisionByEntryId,
|
|
811
|
+
getAllRevisionsByIds,
|
|
812
|
+
getLatestRevisionByEntryId,
|
|
813
|
+
get,
|
|
814
|
+
getRevisions,
|
|
815
|
+
requestChanges,
|
|
816
|
+
requestReview,
|
|
817
|
+
publish,
|
|
818
|
+
list,
|
|
819
|
+
unpublish
|
|
820
|
+
};
|
|
821
|
+
};
|
|
822
|
+
|
|
823
|
+
exports.createEntriesStorageOperations = createEntriesStorageOperations;
|