@webiny/api-headless-cms-ddb 5.17.4 → 5.18.0-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/definitions/entry.d.ts +6 -4
  2. package/definitions/entry.js +19 -8
  3. package/definitions/group.d.ts +8 -0
  4. package/definitions/group.js +74 -0
  5. package/definitions/model.d.ts +8 -0
  6. package/definitions/model.js +96 -0
  7. package/definitions/settings.d.ts +8 -0
  8. package/definitions/settings.js +62 -0
  9. package/definitions/system.d.ts +8 -0
  10. package/definitions/system.js +50 -0
  11. package/definitions/table.d.ts +6 -12
  12. package/definitions/table.js +15 -15
  13. package/dynamoDb/index.d.ts +1 -1
  14. package/dynamoDb/index.js +4 -3
  15. package/dynamoDb/path/plainObject.js +1 -2
  16. package/dynamoDb/path/ref.js +1 -2
  17. package/dynamoDb/storage/date.d.ts +2 -2
  18. package/dynamoDb/storage/date.js +17 -21
  19. package/dynamoDb/storage/longText.d.ts +7 -0
  20. package/dynamoDb/storage/longText.js +83 -0
  21. package/dynamoDb/storage/richText.d.ts +2 -2
  22. package/dynamoDb/storage/richText.js +69 -67
  23. package/dynamoDb/transformValue/datetime.d.ts +1 -1
  24. package/dynamoDb/transformValue/datetime.js +1 -2
  25. package/index.d.ts +2 -6
  26. package/index.js +108 -13
  27. package/operations/entry/dataLoaders.d.ts +34 -19
  28. package/operations/entry/dataLoaders.js +158 -138
  29. package/operations/entry/index.d.ts +8 -4
  30. package/operations/entry/index.js +812 -16
  31. package/operations/entry/keys.d.ts +25 -0
  32. package/operations/entry/keys.js +62 -0
  33. package/operations/entry/systemFields.d.ts +2 -2
  34. package/operations/entry/systemFields.js +1 -2
  35. package/operations/entry/utils.d.ts +13 -9
  36. package/operations/entry/utils.js +62 -18
  37. package/operations/group/index.d.ts +8 -0
  38. package/operations/group/index.js +198 -0
  39. package/operations/model/index.d.ts +6 -3
  40. package/operations/model/index.js +153 -18
  41. package/operations/settings/index.d.ts +6 -3
  42. package/operations/settings/index.js +132 -16
  43. package/operations/system/index.d.ts +6 -3
  44. package/operations/system/index.js +94 -14
  45. package/package.json +11 -11
  46. package/types.d.ts +38 -8
  47. package/types.js +11 -1
  48. package/configurations.d.ts +0 -18
  49. package/configurations.js +0 -24
  50. package/configurations.js.map +0 -1
  51. package/definitions/entry.js.map +0 -1
  52. package/definitions/table.js.map +0 -1
  53. package/dynamoDb/index.js.map +0 -1
  54. package/dynamoDb/path/plainObject.js.map +0 -1
  55. package/dynamoDb/path/ref.js.map +0 -1
  56. package/dynamoDb/storage/date.js.map +0 -1
  57. package/dynamoDb/storage/richText.js.map +0 -1
  58. package/dynamoDb/transformValue/datetime.js.map +0 -1
  59. package/index.js.map +0 -1
  60. package/operations/entry/CmsContentEntryDynamo.d.ts +0 -86
  61. package/operations/entry/CmsContentEntryDynamo.js +0 -972
  62. package/operations/entry/CmsContentEntryDynamo.js.map +0 -1
  63. package/operations/entry/dataLoaders.js.map +0 -1
  64. package/operations/entry/index.js.map +0 -1
  65. package/operations/entry/systemFields.js.map +0 -1
  66. package/operations/entry/utils.js.map +0 -1
  67. package/operations/helpers.d.ts +0 -5
  68. package/operations/helpers.js +0 -96
  69. package/operations/helpers.js.map +0 -1
  70. package/operations/model/CmsContentModelDynamo.d.ts +0 -18
  71. package/operations/model/CmsContentModelDynamo.js +0 -234
  72. package/operations/model/CmsContentModelDynamo.js.map +0 -1
  73. package/operations/model/index.js.map +0 -1
  74. package/operations/modelGroup/CmsContentModelGroupDynamo.d.ts +0 -42
  75. package/operations/modelGroup/CmsContentModelGroupDynamo.js +0 -230
  76. package/operations/modelGroup/CmsContentModelGroupDynamo.js.map +0 -1
  77. package/operations/modelGroup/index.d.ts +0 -3
  78. package/operations/modelGroup/index.js +0 -26
  79. package/operations/modelGroup/index.js.map +0 -1
  80. package/operations/settings/CmsSettingsDynamo.d.ts +0 -16
  81. package/operations/settings/CmsSettingsDynamo.js +0 -145
  82. package/operations/settings/CmsSettingsDynamo.js.map +0 -1
  83. package/operations/settings/index.js.map +0 -1
  84. package/operations/system/CmsSystemDynamo.d.ts +0 -16
  85. package/operations/system/CmsSystemDynamo.js +0 -126
  86. package/operations/system/CmsSystemDynamo.js.map +0 -1
  87. package/operations/system/index.js.map +0 -1
  88. package/types.js.map +0 -1
  89. package/utils.d.ts +0 -5
  90. package/utils.js +0 -62
  91. package/utils.js.map +0 -1
@@ -1,25 +1,821 @@
1
1
  "use strict";
2
2
 
3
+ var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
4
+
3
5
  Object.defineProperty(exports, "__esModule", {
4
6
  value: true
5
7
  });
6
- exports.default = void 0;
8
+ exports.createEntriesStorageOperations = void 0;
9
+
10
+ var _defineProperty2 = _interopRequireDefault(require("@babel/runtime/helpers/defineProperty"));
11
+
12
+ var _error = _interopRequireDefault(require("@webiny/error"));
13
+
14
+ var _dataLoaders = require("./dataLoaders");
15
+
16
+ var _types = require("@webiny/api-headless-cms/types");
17
+
18
+ var _utils = require("./utils");
19
+
20
+ var _keys = require("./keys");
21
+
22
+ var _batchWrite = require("@webiny/db-dynamodb/utils/batchWrite");
23
+
24
+ var _query = require("@webiny/db-dynamodb/utils/query");
25
+
26
+ var _cleanup = require("@webiny/db-dynamodb/utils/cleanup");
27
+
28
+ var _cursor = require("@webiny/utils/cursor");
29
+
30
+ var _zeroPad = require("@webiny/utils/zeroPad");
31
+
32
+ var _apiHeadlessCms = require("@webiny/api-headless-cms");
33
+
34
+ function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) { symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); } keys.push.apply(keys, symbols); } return keys; }
35
+
36
+ function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { (0, _defineProperty2.default)(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; }
37
+
38
+ const createType = () => {
39
+ return "cms.entry";
40
+ };
41
+
42
+ const createLatestType = () => {
43
+ return `${createType()}.l`;
44
+ };
45
+
46
+ const createPublishedType = () => {
47
+ return `${createType()}.p`;
48
+ };
49
+
50
+ const createEntriesStorageOperations = params => {
51
+ const {
52
+ entity,
53
+ plugins
54
+ } = params;
55
+ const dataLoaders = new _dataLoaders.DataLoadersHandler({
56
+ entity
57
+ });
58
+ const storageTransformPlugins = plugins.byType(_apiHeadlessCms.StorageTransformPlugin.type).reduce((collection, plugin) => {
59
+ collection[plugin.fieldType] = plugin;
60
+ return collection;
61
+ }, {});
62
+
63
+ const createStorageTransformCallable = model => {
64
+ return (field, value) => {
65
+ const plugin = storageTransformPlugins[field.type];
66
+
67
+ if (!plugin) {
68
+ return value;
69
+ }
70
+
71
+ return plugin.fromStorage({
72
+ model,
73
+ field,
74
+ value,
75
+
76
+ getStoragePlugin(fieldType) {
77
+ return storageTransformPlugins[fieldType] || storageTransformPlugins["*"];
78
+ },
79
+
80
+ plugins
81
+ });
82
+ };
83
+ };
84
+
85
+ const create = async (model, args) => {
86
+ const {
87
+ entry,
88
+ storageEntry
89
+ } = args;
90
+ const partitionKey = (0, _keys.createPartitionKey)(entry);
91
+ /**
92
+ * We need to:
93
+ * - create new main entry item
94
+ * - create new or update latest entry item
95
+ */
96
+
97
+ const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
98
+ PK: partitionKey,
99
+ SK: (0, _keys.createRevisionSortKey)(entry),
100
+ TYPE: createType(),
101
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
102
+ GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
103
+ })), entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
104
+ PK: partitionKey,
105
+ SK: (0, _keys.createLatestSortKey)(),
106
+ TYPE: createLatestType(),
107
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
108
+ GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
109
+ }))];
110
+
111
+ try {
112
+ await (0, _batchWrite.batchWriteAll)({
113
+ table: entity.table,
114
+ items
115
+ });
116
+ dataLoaders.clearAll({
117
+ model
118
+ });
119
+ } catch (ex) {
120
+ throw new _error.default(ex.message || "Could not insert data into the DynamoDB.", ex.code || "CREATE_ENTRY_ERROR", {
121
+ error: ex,
122
+ entry
123
+ });
124
+ }
7
125
 
8
- var _CmsContentEntryDynamo = require("./CmsContentEntryDynamo");
126
+ return storageEntry;
127
+ };
9
128
 
10
- const contentEntryStorageOperationsProvider = configuration => ({
11
- type: "cms-content-entry-storage-operations-provider",
12
- name: "cms-content-entry-storage-operations-ddb-crud",
13
- provide: async ({
14
- context
15
- }) => {
16
- return new _CmsContentEntryDynamo.CmsContentEntryDynamo({
17
- context,
18
- configuration
129
+ const createRevisionFrom = async (model, params) => {
130
+ const {
131
+ originalEntry,
132
+ entry,
133
+ storageEntry,
134
+ latestEntry
135
+ } = params;
136
+ const partitionKey = (0, _keys.createPartitionKey)(storageEntry);
137
+ /**
138
+ * We need to:
139
+ * - create the main entry item
140
+ * - update the last entry item to a current one
141
+ */
142
+
143
+ const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
144
+ PK: partitionKey,
145
+ SK: (0, _keys.createRevisionSortKey)(storageEntry),
146
+ TYPE: createType(),
147
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
148
+ GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
149
+ })), entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
150
+ PK: partitionKey,
151
+ SK: (0, _keys.createLatestSortKey)(),
152
+ TYPE: createLatestType(),
153
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
154
+ GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
155
+ }))];
156
+
157
+ try {
158
+ await (0, _batchWrite.batchWriteAll)({
159
+ table: entity.table,
160
+ items
161
+ });
162
+ dataLoaders.clearAll({
163
+ model
164
+ });
165
+ } catch (ex) {
166
+ throw new _error.default(ex.message || "Could not create revision from given entry.", ex.code || "CREATE_REVISION_ERROR", {
167
+ error: ex,
168
+ originalEntry,
169
+ latestEntry,
170
+ entry,
171
+ storageEntry
172
+ });
173
+ }
174
+ /**
175
+ * There are no modifications on the entry created so just return the data.
176
+ */
177
+
178
+
179
+ return storageEntry;
180
+ };
181
+
182
+ const update = async (model, params) => {
183
+ const {
184
+ originalEntry,
185
+ entry,
186
+ storageEntry
187
+ } = params;
188
+ const partitionKey = (0, _keys.createPartitionKey)(originalEntry);
189
+ const items = [];
190
+ /**
191
+ * We need to:
192
+ * - update the current entry
193
+ * - update the latest entry if the current entry is the latest one
194
+ */
195
+
196
+ items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
197
+ PK: partitionKey,
198
+ SK: (0, _keys.createRevisionSortKey)(storageEntry),
199
+ TYPE: createType(),
200
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
201
+ GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
202
+ })));
203
+ /**
204
+ * We need the latest entry to update it as well if neccessary.
205
+ */
206
+
207
+ const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
208
+
209
+ if (latestStorageEntry && latestStorageEntry.id === entry.id) {
210
+ items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
211
+ PK: partitionKey,
212
+ SK: (0, _keys.createLatestSortKey)(),
213
+ TYPE: createLatestType(),
214
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
215
+ GSI1_SK: (0, _keys.createGSISortKey)(entry)
216
+ })));
217
+ }
218
+
219
+ try {
220
+ await (0, _batchWrite.batchWriteAll)({
221
+ table: entity.table,
222
+ items
223
+ });
224
+ dataLoaders.clearAll({
225
+ model
226
+ });
227
+ return storageEntry;
228
+ } catch (ex) {
229
+ throw new _error.default(ex.message || "Could not update entry.", ex.code || "UPDATE_ERROR", {
230
+ error: ex,
231
+ originalEntry,
232
+ entry,
233
+ latestStorageEntry
234
+ });
235
+ }
236
+ };
237
+
238
+ const deleteEntry = async (model, params) => {
239
+ const {
240
+ entry
241
+ } = params;
242
+ const queryAllParams = {
243
+ entity,
244
+ partitionKey: (0, _keys.createPartitionKey)(entry),
245
+ options: {
246
+ gte: " "
247
+ }
248
+ };
249
+ let records = [];
250
+
251
+ try {
252
+ records = await (0, _query.queryAll)(queryAllParams);
253
+ } catch (ex) {
254
+ throw new _error.default(ex.message || "Could not load all records.", ex.code || "LOAD_ALL_RECORDS_ERROR", {
255
+ error: ex,
256
+ entry
257
+ });
258
+ }
259
+
260
+ const items = records.map(item => {
261
+ return entity.deleteBatch({
262
+ PK: item.PK,
263
+ SK: item.SK
264
+ });
19
265
  });
20
- }
21
- });
22
266
 
23
- var _default = contentEntryStorageOperationsProvider;
24
- exports.default = _default;
25
- //# sourceMappingURL=index.js.map
267
+ try {
268
+ await (0, _batchWrite.batchWriteAll)({
269
+ table: entity.table,
270
+ items
271
+ });
272
+ dataLoaders.clearAll({
273
+ model
274
+ });
275
+ } catch (ex) {
276
+ throw new _error.default(ex.message || "Could not delete the entry.", ex.code || "DELETE_ENTRY_ERROR", {
277
+ error: ex,
278
+ partitionKey: queryAllParams.partitionKey,
279
+ entry
280
+ });
281
+ }
282
+ };
283
+
284
+ const deleteRevision = async (model, params) => {
285
+ const {
286
+ entryToDelete,
287
+ entryToSetAsLatest,
288
+ storageEntryToSetAsLatest
289
+ } = params;
290
+ const partitionKey = (0, _keys.createPartitionKey)(entryToDelete);
291
+ const items = [entity.deleteBatch({
292
+ PK: partitionKey,
293
+ SK: (0, _keys.createRevisionSortKey)(entryToDelete)
294
+ })];
295
+ const publishedStorageEntry = await getPublishedRevisionByEntryId(model, entryToDelete);
296
+ /**
297
+ * If revision we are deleting is the published one as well, we need to delete those records as well.
298
+ */
299
+
300
+ if (publishedStorageEntry && entryToDelete.id === publishedStorageEntry.id) {
301
+ items.push(entity.deleteBatch({
302
+ PK: partitionKey,
303
+ SK: (0, _keys.createPublishedSortKey)()
304
+ }));
305
+ }
306
+
307
+ if (storageEntryToSetAsLatest) {
308
+ items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntryToSetAsLatest), {}, {
309
+ PK: partitionKey,
310
+ SK: (0, _keys.createLatestSortKey)(),
311
+ TYPE: createLatestType(),
312
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
313
+ GSI1_SK: (0, _keys.createGSISortKey)(storageEntryToSetAsLatest)
314
+ })));
315
+ }
316
+
317
+ try {
318
+ await (0, _batchWrite.batchWriteAll)({
319
+ table: entity.table,
320
+ items
321
+ });
322
+ dataLoaders.clearAll({
323
+ model
324
+ });
325
+ } catch (ex) {
326
+ throw new _error.default(ex.message, ex.code, {
327
+ error: ex,
328
+ entryToDelete,
329
+ entryToSetAsLatest
330
+ });
331
+ }
332
+ };
333
+
334
+ const getAllRevisionsByIds = async (model, params) => {
335
+ return await dataLoaders.getAllEntryRevisions({
336
+ model,
337
+ ids: params.ids
338
+ });
339
+ };
340
+
341
+ const getLatestRevisionByEntryId = async (model, params) => {
342
+ const result = await dataLoaders.getLatestRevisionByEntryId({
343
+ model,
344
+ ids: [params.id]
345
+ });
346
+
347
+ if (result.length === 0) {
348
+ return null;
349
+ }
350
+
351
+ return result.shift();
352
+ };
353
+
354
+ const getPublishedRevisionByEntryId = async (model, params) => {
355
+ const result = await dataLoaders.getPublishedRevisionByEntryId({
356
+ model,
357
+ ids: [params.id]
358
+ });
359
+
360
+ if (result.length === 0) {
361
+ return null;
362
+ }
363
+
364
+ return result.shift();
365
+ };
366
+
367
+ const getRevisionById = async (model, params) => {
368
+ const result = await dataLoaders.getRevisionById({
369
+ model,
370
+ ids: [params.id]
371
+ });
372
+
373
+ if (result.length === 0) {
374
+ return null;
375
+ }
376
+
377
+ return result.shift();
378
+ };
379
+
380
+ const getRevisions = async (model, params) => {
381
+ return await dataLoaders.getAllEntryRevisions({
382
+ model,
383
+ ids: [params.id]
384
+ });
385
+ };
386
+
387
+ const getByIds = async (model, params) => {
388
+ return dataLoaders.getRevisionById({
389
+ model,
390
+ ids: params.ids
391
+ });
392
+ };
393
+
394
+ const getLatestByIds = async (model, params) => {
395
+ return dataLoaders.getLatestRevisionByEntryId({
396
+ model,
397
+ ids: params.ids
398
+ });
399
+ };
400
+
401
+ const getPublishedByIds = async (model, params) => {
402
+ return dataLoaders.getPublishedRevisionByEntryId({
403
+ model,
404
+ ids: params.ids
405
+ });
406
+ };
407
+
408
+ const getPreviousRevision = async (model, params) => {
409
+ const {
410
+ tenant,
411
+ locale,
412
+ entryId,
413
+ version
414
+ } = params;
415
+ const queryParams = {
416
+ entity,
417
+ partitionKey: (0, _keys.createPartitionKey)({
418
+ tenant,
419
+ locale,
420
+ id: entryId
421
+ }),
422
+ options: {
423
+ lt: `REV#${(0, _zeroPad.zeroPad)(version)}`,
424
+
425
+ /**
426
+ * We need to have extra checks because DynamoDB will return published or latest record if there is no REV# record.
427
+ */
428
+ filters: [{
429
+ attr: "TYPE",
430
+ eq: createType()
431
+ }, {
432
+ attr: "version",
433
+ lt: version
434
+ }],
435
+ reverse: true
436
+ }
437
+ };
438
+
439
+ try {
440
+ const result = await (0, _query.queryOne)(queryParams);
441
+ return (0, _cleanup.cleanupItem)(entity, result);
442
+ } catch (ex) {
443
+ throw new _error.default(ex.message || "Could not get previous version of given entry.", ex.code || "GET_PREVIOUS_VERSION_ERROR", _objectSpread(_objectSpread({}, params), {}, {
444
+ error: ex,
445
+ partitionKey: queryParams.partitionKey,
446
+ options: queryParams.options,
447
+ model
448
+ }));
449
+ }
450
+ };
451
+
452
+ const list = async (model, params) => {
453
+ const {
454
+ limit: initialLimit = 10,
455
+ where: originalWhere,
456
+ after,
457
+ sort
458
+ } = params;
459
+ const limit = initialLimit <= 0 || initialLimit >= 100 ? 100 : initialLimit;
460
+ const type = originalWhere.published ? "P" : "L";
461
+ const queryAllParams = {
462
+ entity,
463
+ partitionKey: (0, _keys.createGSIPartitionKey)(model, type),
464
+ options: {
465
+ index: "GSI1",
466
+ gte: " "
467
+ }
468
+ };
469
+ let records = [];
470
+
471
+ try {
472
+ records = await (0, _query.queryAll)(queryAllParams);
473
+ } catch (ex) {
474
+ throw new _error.default(ex.message, "QUERY_ENTRIES_ERROR", {
475
+ error: ex,
476
+ partitionKey: queryAllParams.partitionKey,
477
+ options: queryAllParams.options
478
+ });
479
+ }
480
+
481
+ if (records.length === 0) {
482
+ return {
483
+ hasMoreItems: false,
484
+ totalCount: 0,
485
+ cursor: null,
486
+ items: []
487
+ };
488
+ }
489
+
490
+ const where = _objectSpread({}, originalWhere);
491
+
492
+ delete where["published"];
493
+ delete where["latest"];
494
+ delete where["locale"];
495
+ delete where["tenant"];
496
+ /**
497
+ * We need a object containing field, transformers and paths.
498
+ * Just build it here and pass on into other methods that require it to avoid mapping multiple times.
499
+ */
500
+
501
+ const modelFields = (0, _utils.buildModelFields)({
502
+ plugins,
503
+ model
504
+ });
505
+ /**
506
+ * Filter the read items via the code.
507
+ * It will build the filters out of the where input and transform the values it is using.
508
+ */
509
+
510
+ const filteredItems = await (0, _utils.filterItems)({
511
+ items: records,
512
+ where,
513
+ plugins,
514
+ fields: modelFields,
515
+ fromStorage: createStorageTransformCallable(model)
516
+ });
517
+ const totalCount = filteredItems.length;
518
+ /**
519
+ * Sorting is also done via the code.
520
+ * It takes the sort input and sorts by it via the lodash sortBy method.
521
+ */
522
+
523
+ const sortedItems = (0, _utils.sortEntryItems)({
524
+ items: filteredItems,
525
+ sort,
526
+ fields: modelFields
527
+ });
528
+ const start = (0, _cursor.decodeCursor)(after) || 0;
529
+ const hasMoreItems = totalCount > start + limit;
530
+ const end = limit > totalCount + start + limit ? undefined : start + limit;
531
+ const slicedItems = sortedItems.slice(start, end);
532
+ /**
533
+ * Although we do not need a cursor here, we will use it as such to keep it standardized.
534
+ * Number is simply encoded.
535
+ */
536
+
537
+ const cursor = totalCount > start + limit ? (0, _cursor.encodeCursor)(start + limit) : null;
538
+ return {
539
+ hasMoreItems,
540
+ totalCount,
541
+ cursor,
542
+ items: (0, _cleanup.cleanupItems)(entity, slicedItems)
543
+ };
544
+ };
545
+
546
+ const get = async (model, params) => {
547
+ const {
548
+ items
549
+ } = await list(model, _objectSpread(_objectSpread({}, params), {}, {
550
+ limit: 1
551
+ }));
552
+
553
+ if (items.length === 0) {
554
+ return null;
555
+ }
556
+
557
+ return items.shift();
558
+ };
559
+
560
+ const requestChanges = async (model, params) => {
561
+ const {
562
+ entry,
563
+ storageEntry,
564
+ originalEntry
565
+ } = params;
566
+ const partitionKey = (0, _keys.createPartitionKey)(entry);
567
+ /**
568
+ * We need to:
569
+ * - update the existing entry
570
+ * - update latest version - if existing entry is the latest version
571
+ */
572
+
573
+ const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
574
+ TYPE: createType(),
575
+ PK: partitionKey,
576
+ SK: (0, _keys.createRevisionSortKey)(entry),
577
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
578
+ GSI1_SK: (0, _keys.createGSISortKey)(entry)
579
+ }))];
580
+ /**
581
+ * We need the latest entry to see if something needs to be updated along side the request changes one.
582
+ */
583
+
584
+ const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
585
+
586
+ if (latestStorageEntry.id === entry.id) {
587
+ items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
588
+ PK: partitionKey,
589
+ SK: (0, _keys.createLatestSortKey)(),
590
+ TYPE: createLatestType(),
591
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
592
+ GSI1_SK: (0, _keys.createGSISortKey)(entry)
593
+ })));
594
+ }
595
+
596
+ try {
597
+ await (0, _batchWrite.batchWriteAll)({
598
+ table: entity.table,
599
+ items
600
+ });
601
+ dataLoaders.clearAll({
602
+ model
603
+ });
604
+ } catch (ex) {
605
+ throw new _error.default(ex.message || "Could not execute the request changes batch.", ex.code || "REQUEST_CHANGES_ERROR", {
606
+ entry,
607
+ originalEntry
608
+ });
609
+ }
610
+
611
+ return entry;
612
+ };
613
+
614
+ const requestReview = async (model, params) => {
615
+ const {
616
+ entry,
617
+ storageEntry,
618
+ originalEntry
619
+ } = params;
620
+ const partitionKey = (0, _keys.createPartitionKey)(entry);
621
+ /**
622
+ * We need to:
623
+ * - update existing entry
624
+ * - update latest entry - if existing entry is the latest entry
625
+ */
626
+
627
+ const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
628
+ TYPE: createType(),
629
+ PK: partitionKey,
630
+ SK: (0, _keys.createRevisionSortKey)(entry),
631
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
632
+ GSI1_SK: (0, _keys.createGSISortKey)(entry)
633
+ }))];
634
+ /**
635
+ * We need the latest entry to see if something needs to be updated along side the request review one.
636
+ */
637
+
638
+ const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
639
+
640
+ if (latestStorageEntry.id === entry.id) {
641
+ items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
642
+ PK: partitionKey,
643
+ SK: (0, _keys.createLatestSortKey)(),
644
+ TYPE: createLatestType(),
645
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
646
+ GSI1_SK: (0, _keys.createGSISortKey)(entry)
647
+ })));
648
+ }
649
+
650
+ try {
651
+ await (0, _batchWrite.batchWriteAll)({
652
+ table: entity.table,
653
+ items
654
+ });
655
+ dataLoaders.clearAll({
656
+ model
657
+ });
658
+ return entry;
659
+ } catch (ex) {
660
+ throw new _error.default(ex.message || "Could not execute request review batch.", ex.code || "REQUEST_REVIEW_ERROR", {
661
+ entry,
662
+ storageEntry,
663
+ originalEntry
664
+ });
665
+ }
666
+ };
667
+
668
+ const publish = async (model, params) => {
669
+ const {
670
+ entry,
671
+ storageEntry
672
+ } = params;
673
+ const partitionKey = (0, _keys.createPartitionKey)(entry);
674
+ /**
675
+ * We need the latest and published entries to see if something needs to be updated along side the publishing one.
676
+ */
677
+
678
+ const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
679
+ const publishedStorageEntry = await getPublishedRevisionByEntryId(model, entry);
680
+ /**
681
+ * We need to update:
682
+ * - current entry revision sort key
683
+ * - published sort key
684
+ * - latest sort key - if entry updated is actually latest
685
+ * - previous published entry to unpublished status - if any previously published entry
686
+ */
687
+
688
+ const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
689
+ PK: partitionKey,
690
+ SK: (0, _keys.createRevisionSortKey)(entry),
691
+ TYPE: createType(),
692
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
693
+ GSI1_SK: (0, _keys.createGSISortKey)(entry)
694
+ })), entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
695
+ PK: partitionKey,
696
+ SK: (0, _keys.createPublishedSortKey)(),
697
+ TYPE: createPublishedType(),
698
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "P"),
699
+ GSI1_SK: (0, _keys.createGSISortKey)(entry)
700
+ }))];
701
+
702
+ if (entry.id === latestStorageEntry.id) {
703
+ items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
704
+ PK: partitionKey,
705
+ SK: (0, _keys.createLatestSortKey)(),
706
+ TYPE: createLatestType(),
707
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
708
+ GSI1_SK: (0, _keys.createGSISortKey)(entry)
709
+ })));
710
+ }
711
+
712
+ if (publishedStorageEntry) {
713
+ items.push(entity.putBatch(_objectSpread(_objectSpread({}, publishedStorageEntry), {}, {
714
+ PK: partitionKey,
715
+ SK: (0, _keys.createRevisionSortKey)(publishedStorageEntry),
716
+ TYPE: createType(),
717
+ status: _types.CONTENT_ENTRY_STATUS.UNPUBLISHED,
718
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
719
+ GSI1_SK: (0, _keys.createGSISortKey)(publishedStorageEntry)
720
+ })));
721
+ }
722
+
723
+ try {
724
+ await (0, _batchWrite.batchWriteAll)({
725
+ table: entity.table,
726
+ items
727
+ });
728
+ dataLoaders.clearAll({
729
+ model
730
+ });
731
+ return entry;
732
+ } catch (ex) {
733
+ throw new _error.default(ex.message || "Could not execute the publishing batch.", ex.code || "PUBLISH_ERROR", {
734
+ entry,
735
+ latestStorageEntry,
736
+ publishedStorageEntry
737
+ });
738
+ }
739
+ };
740
+
741
+ const unpublish = async (model, params) => {
742
+ const {
743
+ entry,
744
+ storageEntry
745
+ } = params;
746
+ const partitionKey = (0, _keys.createPartitionKey)(entry);
747
+ /**
748
+ * We need to:
749
+ * - delete currently published entry
750
+ * - update current entry revision with new data
751
+ * - update latest entry status - if entry being unpublished is latest
752
+ */
753
+
754
+ const items = [entity.deleteBatch({
755
+ PK: partitionKey,
756
+ SK: (0, _keys.createPublishedSortKey)()
757
+ }), entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
758
+ PK: partitionKey,
759
+ SK: (0, _keys.createRevisionSortKey)(entry),
760
+ TYPE: createType(),
761
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
762
+ GSI1_SK: (0, _keys.createGSISortKey)(entry)
763
+ }))];
764
+ /**
765
+ * We need the latest entry to see if something needs to be updated along side the unpublishing one.
766
+ */
767
+
768
+ const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
769
+
770
+ if (entry.id === latestStorageEntry.id) {
771
+ items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
772
+ PK: partitionKey,
773
+ SK: (0, _keys.createLatestSortKey)(),
774
+ TYPE: createLatestType(),
775
+ GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
776
+ GSI1_SK: (0, _keys.createGSISortKey)(entry)
777
+ })));
778
+ }
779
+
780
+ try {
781
+ await (0, _batchWrite.batchWriteAll)({
782
+ table: entity.table,
783
+ items
784
+ });
785
+ dataLoaders.clearAll({
786
+ model
787
+ });
788
+ return storageEntry;
789
+ } catch (ex) {
790
+ throw new _error.default(ex.message || "Could not execute unpublish batch.", ex.code || "UNPUBLISH_ERROR", {
791
+ entry,
792
+ storageEntry
793
+ });
794
+ }
795
+ };
796
+
797
+ return {
798
+ create,
799
+ createRevisionFrom,
800
+ update,
801
+ delete: deleteEntry,
802
+ deleteRevision,
803
+ getPreviousRevision,
804
+ getPublishedByIds,
805
+ getLatestByIds,
806
+ getByIds,
807
+ getRevisionById,
808
+ getPublishedRevisionByEntryId,
809
+ getAllRevisionsByIds,
810
+ getLatestRevisionByEntryId,
811
+ get,
812
+ getRevisions,
813
+ requestChanges,
814
+ requestReview,
815
+ publish,
816
+ list,
817
+ unpublish
818
+ };
819
+ };
820
+
821
+ exports.createEntriesStorageOperations = createEntriesStorageOperations;