@webiny/api-headless-cms-ddb 0.0.0-ee-vpcs.549378cf03
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +25 -0
- package/definitions/entry.d.ts +9 -0
- package/definitions/entry.js +96 -0
- package/definitions/entry.js.map +1 -0
- package/definitions/group.d.ts +9 -0
- package/definitions/group.js +70 -0
- package/definitions/group.js.map +1 -0
- package/definitions/model.d.ts +9 -0
- package/definitions/model.js +97 -0
- package/definitions/model.js.map +1 -0
- package/definitions/settings.d.ts +9 -0
- package/definitions/settings.js +58 -0
- package/definitions/settings.js.map +1 -0
- package/definitions/system.d.ts +9 -0
- package/definitions/system.js +46 -0
- package/definitions/system.js.map +1 -0
- package/definitions/table.d.ts +9 -0
- package/definitions/table.js +30 -0
- package/definitions/table.js.map +1 -0
- package/dynamoDb/index.d.ts +2 -0
- package/dynamoDb/index.js +20 -0
- package/dynamoDb/index.js.map +1 -0
- package/dynamoDb/path/plainObject.d.ts +2 -0
- package/dynamoDb/path/plainObject.js +40 -0
- package/dynamoDb/path/plainObject.js.map +1 -0
- package/dynamoDb/storage/date.d.ts +3 -0
- package/dynamoDb/storage/date.js +109 -0
- package/dynamoDb/storage/date.js.map +1 -0
- package/dynamoDb/storage/longText.d.ts +10 -0
- package/dynamoDb/storage/longText.js +108 -0
- package/dynamoDb/storage/longText.js.map +1 -0
- package/dynamoDb/storage/richText.d.ts +2 -0
- package/dynamoDb/storage/richText.js +113 -0
- package/dynamoDb/storage/richText.js.map +1 -0
- package/dynamoDb/transformValue/datetime.d.ts +5 -0
- package/dynamoDb/transformValue/datetime.js +52 -0
- package/dynamoDb/transformValue/datetime.js.map +1 -0
- package/index.d.ts +3 -0
- package/index.js +150 -0
- package/index.js.map +1 -0
- package/operations/entry/dataLoaders.d.ts +42 -0
- package/operations/entry/dataLoaders.js +321 -0
- package/operations/entry/dataLoaders.js.map +1 -0
- package/operations/entry/filtering/createExpressions.d.ts +26 -0
- package/operations/entry/filtering/createExpressions.js +217 -0
- package/operations/entry/filtering/createExpressions.js.map +1 -0
- package/operations/entry/filtering/createFields.d.ts +14 -0
- package/operations/entry/filtering/createFields.js +123 -0
- package/operations/entry/filtering/createFields.js.map +1 -0
- package/operations/entry/filtering/extractSort.d.ts +13 -0
- package/operations/entry/filtering/extractSort.js +55 -0
- package/operations/entry/filtering/extractSort.js.map +1 -0
- package/operations/entry/filtering/filter.d.ts +15 -0
- package/operations/entry/filtering/filter.js +178 -0
- package/operations/entry/filtering/filter.js.map +1 -0
- package/operations/entry/filtering/fullTextSearch.d.ts +14 -0
- package/operations/entry/filtering/fullTextSearch.js +60 -0
- package/operations/entry/filtering/fullTextSearch.js.map +1 -0
- package/operations/entry/filtering/getValue.d.ts +5 -0
- package/operations/entry/filtering/getValue.js +81 -0
- package/operations/entry/filtering/getValue.js.map +1 -0
- package/operations/entry/filtering/index.d.ts +2 -0
- package/operations/entry/filtering/index.js +21 -0
- package/operations/entry/filtering/index.js.map +1 -0
- package/operations/entry/filtering/mapPlugins.d.ts +8 -0
- package/operations/entry/filtering/mapPlugins.js +39 -0
- package/operations/entry/filtering/mapPlugins.js.map +1 -0
- package/operations/entry/filtering/plugins/defaultFilterCreate.d.ts +2 -0
- package/operations/entry/filtering/plugins/defaultFilterCreate.js +48 -0
- package/operations/entry/filtering/plugins/defaultFilterCreate.js.map +1 -0
- package/operations/entry/filtering/plugins/index.d.ts +1 -0
- package/operations/entry/filtering/plugins/index.js +18 -0
- package/operations/entry/filtering/plugins/index.js.map +1 -0
- package/operations/entry/filtering/plugins/objectFilterCreate.d.ts +2 -0
- package/operations/entry/filtering/plugins/objectFilterCreate.js +107 -0
- package/operations/entry/filtering/plugins/objectFilterCreate.js.map +1 -0
- package/operations/entry/filtering/plugins/refFilterCreate.d.ts +2 -0
- package/operations/entry/filtering/plugins/refFilterCreate.js +89 -0
- package/operations/entry/filtering/plugins/refFilterCreate.js.map +1 -0
- package/operations/entry/filtering/sort.d.ts +9 -0
- package/operations/entry/filtering/sort.js +80 -0
- package/operations/entry/filtering/sort.js.map +1 -0
- package/operations/entry/filtering/systemFields.d.ts +4 -0
- package/operations/entry/filtering/systemFields.js +72 -0
- package/operations/entry/filtering/systemFields.js.map +1 -0
- package/operations/entry/filtering/transform.d.ts +6 -0
- package/operations/entry/filtering/transform.js +19 -0
- package/operations/entry/filtering/transform.js.map +1 -0
- package/operations/entry/filtering/types.d.ts +40 -0
- package/operations/entry/filtering/types.js +5 -0
- package/operations/entry/filtering/types.js.map +1 -0
- package/operations/entry/filtering/values.d.ts +2 -0
- package/operations/entry/filtering/values.js +28 -0
- package/operations/entry/filtering/values.js.map +1 -0
- package/operations/entry/filtering/where.d.ts +5 -0
- package/operations/entry/filtering/where.js +38 -0
- package/operations/entry/filtering/where.js.map +1 -0
- package/operations/entry/index.d.ts +8 -0
- package/operations/entry/index.js +872 -0
- package/operations/entry/index.js.map +1 -0
- package/operations/entry/keys.d.ts +25 -0
- package/operations/entry/keys.js +73 -0
- package/operations/entry/keys.js.map +1 -0
- package/operations/entry/systemFields.d.ts +2 -0
- package/operations/entry/systemFields.js +74 -0
- package/operations/entry/systemFields.js.map +1 -0
- package/operations/group/index.d.ts +9 -0
- package/operations/group/index.js +192 -0
- package/operations/group/index.js.map +1 -0
- package/operations/model/index.d.ts +7 -0
- package/operations/model/index.js +162 -0
- package/operations/model/index.js.map +1 -0
- package/operations/settings/index.d.ts +7 -0
- package/operations/settings/index.js +135 -0
- package/operations/settings/index.js.map +1 -0
- package/operations/system/index.d.ts +7 -0
- package/operations/system/index.js +99 -0
- package/operations/system/index.js.map +1 -0
- package/package.json +60 -0
- package/plugins/CmsEntryFieldFilterPathPlugin.d.ts +22 -0
- package/plugins/CmsEntryFieldFilterPathPlugin.js +55 -0
- package/plugins/CmsEntryFieldFilterPathPlugin.js.map +1 -0
- package/plugins/CmsEntryFieldFilterPlugin.d.ts +43 -0
- package/plugins/CmsEntryFieldFilterPlugin.js +31 -0
- package/plugins/CmsEntryFieldFilterPlugin.js.map +1 -0
- package/plugins/index.d.ts +1 -0
- package/plugins/index.js +18 -0
- package/plugins/index.js.map +1 -0
- package/types.d.ts +53 -0
- package/types.js +16 -0
- package/types.js.map +1 -0
|
@@ -0,0 +1,872 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
|
|
3
|
+
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault").default;
|
|
4
|
+
|
|
5
|
+
Object.defineProperty(exports, "__esModule", {
|
|
6
|
+
value: true
|
|
7
|
+
});
|
|
8
|
+
exports.createEntriesStorageOperations = void 0;
|
|
9
|
+
|
|
10
|
+
var _objectSpread2 = _interopRequireDefault(require("@babel/runtime/helpers/objectSpread2"));
|
|
11
|
+
|
|
12
|
+
var _error = _interopRequireDefault(require("@webiny/error"));
|
|
13
|
+
|
|
14
|
+
var _dataLoaders = require("./dataLoaders");
|
|
15
|
+
|
|
16
|
+
var _types = require("@webiny/api-headless-cms/types");
|
|
17
|
+
|
|
18
|
+
var _keys = require("./keys");
|
|
19
|
+
|
|
20
|
+
var _batchWrite = require("@webiny/db-dynamodb/utils/batchWrite");
|
|
21
|
+
|
|
22
|
+
var _query = require("@webiny/db-dynamodb/utils/query");
|
|
23
|
+
|
|
24
|
+
var _cleanup = require("@webiny/db-dynamodb/utils/cleanup");
|
|
25
|
+
|
|
26
|
+
var _cursor = require("@webiny/utils/cursor");
|
|
27
|
+
|
|
28
|
+
var _zeroPad = require("@webiny/utils/zeroPad");
|
|
29
|
+
|
|
30
|
+
var _apiHeadlessCms = require("@webiny/api-headless-cms");
|
|
31
|
+
|
|
32
|
+
var _createFields = require("./filtering/createFields");
|
|
33
|
+
|
|
34
|
+
var _filtering = require("./filtering");
|
|
35
|
+
|
|
36
|
+
const createType = () => {
|
|
37
|
+
return "cms.entry";
|
|
38
|
+
};
|
|
39
|
+
|
|
40
|
+
const createLatestType = () => {
|
|
41
|
+
return `${createType()}.l`;
|
|
42
|
+
};
|
|
43
|
+
|
|
44
|
+
const createPublishedType = () => {
|
|
45
|
+
return `${createType()}.p`;
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
const convertToStorageEntry = params => {
|
|
49
|
+
const {
|
|
50
|
+
model,
|
|
51
|
+
storageEntry
|
|
52
|
+
} = params;
|
|
53
|
+
const values = model.convertValueKeyToStorage({
|
|
54
|
+
fields: model.fields,
|
|
55
|
+
values: storageEntry.values
|
|
56
|
+
});
|
|
57
|
+
return (0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
58
|
+
values
|
|
59
|
+
});
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
const convertFromStorageEntry = params => {
|
|
63
|
+
const {
|
|
64
|
+
model,
|
|
65
|
+
storageEntry
|
|
66
|
+
} = params;
|
|
67
|
+
const values = model.convertValueKeyFromStorage({
|
|
68
|
+
fields: model.fields,
|
|
69
|
+
values: storageEntry.values
|
|
70
|
+
});
|
|
71
|
+
return (0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
72
|
+
values
|
|
73
|
+
});
|
|
74
|
+
};
|
|
75
|
+
|
|
76
|
+
const createEntriesStorageOperations = params => {
|
|
77
|
+
const {
|
|
78
|
+
entity,
|
|
79
|
+
plugins
|
|
80
|
+
} = params;
|
|
81
|
+
const dataLoaders = new _dataLoaders.DataLoadersHandler({
|
|
82
|
+
entity
|
|
83
|
+
});
|
|
84
|
+
const storageTransformPlugins = plugins.byType(_apiHeadlessCms.StorageTransformPlugin.type).reduce((collection, plugin) => {
|
|
85
|
+
collection[plugin.fieldType] = plugin;
|
|
86
|
+
return collection;
|
|
87
|
+
}, {});
|
|
88
|
+
|
|
89
|
+
const createStorageTransformCallable = model => {
|
|
90
|
+
return (field, value) => {
|
|
91
|
+
const plugin = storageTransformPlugins[field.type];
|
|
92
|
+
|
|
93
|
+
if (!plugin) {
|
|
94
|
+
return value;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
return plugin.fromStorage({
|
|
98
|
+
model,
|
|
99
|
+
field,
|
|
100
|
+
value,
|
|
101
|
+
|
|
102
|
+
getStoragePlugin(fieldType) {
|
|
103
|
+
return storageTransformPlugins[fieldType] || storageTransformPlugins["*"];
|
|
104
|
+
},
|
|
105
|
+
|
|
106
|
+
plugins
|
|
107
|
+
});
|
|
108
|
+
};
|
|
109
|
+
};
|
|
110
|
+
|
|
111
|
+
const create = async (model, params) => {
|
|
112
|
+
const {
|
|
113
|
+
entry,
|
|
114
|
+
storageEntry: initialStorageEntry
|
|
115
|
+
} = params;
|
|
116
|
+
const partitionKey = (0, _keys.createPartitionKey)({
|
|
117
|
+
id: entry.id,
|
|
118
|
+
locale: model.locale,
|
|
119
|
+
tenant: model.tenant
|
|
120
|
+
});
|
|
121
|
+
const isPublished = entry.status === "published";
|
|
122
|
+
const locked = isPublished ? true : entry.locked;
|
|
123
|
+
const storageEntry = convertToStorageEntry({
|
|
124
|
+
model,
|
|
125
|
+
storageEntry: initialStorageEntry
|
|
126
|
+
});
|
|
127
|
+
/**
|
|
128
|
+
* We need to:
|
|
129
|
+
* - create new main entry item
|
|
130
|
+
* - create new or update latest entry item
|
|
131
|
+
*/
|
|
132
|
+
|
|
133
|
+
const items = [entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
134
|
+
locked,
|
|
135
|
+
PK: partitionKey,
|
|
136
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
137
|
+
TYPE: createType(),
|
|
138
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
139
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
140
|
+
})), entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
141
|
+
locked,
|
|
142
|
+
PK: partitionKey,
|
|
143
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
144
|
+
TYPE: createLatestType(),
|
|
145
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
146
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
147
|
+
}))];
|
|
148
|
+
/**
|
|
149
|
+
* We need to create published entry if
|
|
150
|
+
*/
|
|
151
|
+
|
|
152
|
+
if (isPublished) {
|
|
153
|
+
items.push(entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
154
|
+
locked,
|
|
155
|
+
PK: partitionKey,
|
|
156
|
+
SK: (0, _keys.createPublishedSortKey)(),
|
|
157
|
+
TYPE: createLatestType(),
|
|
158
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "P"),
|
|
159
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
160
|
+
})));
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
try {
|
|
164
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
165
|
+
table: entity.table,
|
|
166
|
+
items
|
|
167
|
+
});
|
|
168
|
+
dataLoaders.clearAll({
|
|
169
|
+
model
|
|
170
|
+
});
|
|
171
|
+
} catch (ex) {
|
|
172
|
+
throw new _error.default(ex.message || "Could not insert data into the DynamoDB.", ex.code || "CREATE_ENTRY_ERROR", {
|
|
173
|
+
error: ex,
|
|
174
|
+
entry
|
|
175
|
+
});
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
return initialStorageEntry;
|
|
179
|
+
};
|
|
180
|
+
|
|
181
|
+
const createRevisionFrom = async (model, params) => {
|
|
182
|
+
const {
|
|
183
|
+
entry,
|
|
184
|
+
storageEntry: initialStorageEntry
|
|
185
|
+
} = params;
|
|
186
|
+
const partitionKey = (0, _keys.createPartitionKey)({
|
|
187
|
+
id: entry.id,
|
|
188
|
+
locale: model.locale,
|
|
189
|
+
tenant: model.tenant
|
|
190
|
+
});
|
|
191
|
+
const storageEntry = convertToStorageEntry({
|
|
192
|
+
storageEntry: initialStorageEntry,
|
|
193
|
+
model
|
|
194
|
+
});
|
|
195
|
+
/**
|
|
196
|
+
* We need to:
|
|
197
|
+
* - create the main entry item
|
|
198
|
+
* - update the last entry item to a current one
|
|
199
|
+
*/
|
|
200
|
+
|
|
201
|
+
const items = [entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
202
|
+
PK: partitionKey,
|
|
203
|
+
SK: (0, _keys.createRevisionSortKey)(storageEntry),
|
|
204
|
+
TYPE: createType(),
|
|
205
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
206
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
207
|
+
})), entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
208
|
+
PK: partitionKey,
|
|
209
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
210
|
+
TYPE: createLatestType(),
|
|
211
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
212
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
213
|
+
}))];
|
|
214
|
+
|
|
215
|
+
try {
|
|
216
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
217
|
+
table: entity.table,
|
|
218
|
+
items
|
|
219
|
+
});
|
|
220
|
+
dataLoaders.clearAll({
|
|
221
|
+
model
|
|
222
|
+
});
|
|
223
|
+
} catch (ex) {
|
|
224
|
+
throw new _error.default(ex.message || "Could not create revision from given entry.", ex.code || "CREATE_REVISION_ERROR", {
|
|
225
|
+
error: ex,
|
|
226
|
+
entry,
|
|
227
|
+
storageEntry
|
|
228
|
+
});
|
|
229
|
+
}
|
|
230
|
+
/**
|
|
231
|
+
* There are no modifications on the entry created so just return the data.
|
|
232
|
+
*/
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
return initialStorageEntry;
|
|
236
|
+
};
|
|
237
|
+
|
|
238
|
+
const update = async (model, params) => {
|
|
239
|
+
const {
|
|
240
|
+
entry,
|
|
241
|
+
storageEntry: initialStorageEntry
|
|
242
|
+
} = params;
|
|
243
|
+
const partitionKey = (0, _keys.createPartitionKey)({
|
|
244
|
+
id: entry.id,
|
|
245
|
+
locale: model.locale,
|
|
246
|
+
tenant: model.tenant
|
|
247
|
+
});
|
|
248
|
+
const isPublished = entry.status === "published";
|
|
249
|
+
const locked = isPublished ? true : entry.locked;
|
|
250
|
+
const items = [];
|
|
251
|
+
const storageEntry = convertToStorageEntry({
|
|
252
|
+
model,
|
|
253
|
+
storageEntry: initialStorageEntry
|
|
254
|
+
});
|
|
255
|
+
/**
|
|
256
|
+
* We need to:
|
|
257
|
+
* - update the current entry
|
|
258
|
+
* - update the latest entry if the current entry is the latest one
|
|
259
|
+
*/
|
|
260
|
+
|
|
261
|
+
items.push(entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
262
|
+
locked,
|
|
263
|
+
PK: partitionKey,
|
|
264
|
+
SK: (0, _keys.createRevisionSortKey)(storageEntry),
|
|
265
|
+
TYPE: createType(),
|
|
266
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
267
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
268
|
+
})));
|
|
269
|
+
|
|
270
|
+
if (isPublished) {
|
|
271
|
+
items.push(entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
272
|
+
locked,
|
|
273
|
+
PK: partitionKey,
|
|
274
|
+
SK: (0, _keys.createPublishedSortKey)(),
|
|
275
|
+
TYPE: createPublishedType(),
|
|
276
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "P"),
|
|
277
|
+
GSI1_SK: (0, _keys.createGSISortKey)(storageEntry)
|
|
278
|
+
})));
|
|
279
|
+
}
|
|
280
|
+
/**
|
|
281
|
+
* We need the latest entry to update it as well if neccessary.
|
|
282
|
+
*/
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
|
|
286
|
+
|
|
287
|
+
if (latestStorageEntry && latestStorageEntry.id === entry.id) {
|
|
288
|
+
items.push(entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
289
|
+
locked,
|
|
290
|
+
PK: partitionKey,
|
|
291
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
292
|
+
TYPE: createLatestType(),
|
|
293
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
294
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
295
|
+
})));
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
try {
|
|
299
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
300
|
+
table: entity.table,
|
|
301
|
+
items
|
|
302
|
+
});
|
|
303
|
+
dataLoaders.clearAll({
|
|
304
|
+
model
|
|
305
|
+
});
|
|
306
|
+
return initialStorageEntry;
|
|
307
|
+
} catch (ex) {
|
|
308
|
+
throw new _error.default(ex.message || "Could not update entry.", ex.code || "UPDATE_ERROR", {
|
|
309
|
+
error: ex,
|
|
310
|
+
entry,
|
|
311
|
+
latestStorageEntry
|
|
312
|
+
});
|
|
313
|
+
}
|
|
314
|
+
};
|
|
315
|
+
|
|
316
|
+
const deleteEntry = async (model, params) => {
|
|
317
|
+
const {
|
|
318
|
+
entry
|
|
319
|
+
} = params;
|
|
320
|
+
const queryAllParams = {
|
|
321
|
+
entity,
|
|
322
|
+
partitionKey: (0, _keys.createPartitionKey)({
|
|
323
|
+
id: entry.id,
|
|
324
|
+
locale: model.locale,
|
|
325
|
+
tenant: model.tenant
|
|
326
|
+
}),
|
|
327
|
+
options: {
|
|
328
|
+
gte: " "
|
|
329
|
+
}
|
|
330
|
+
};
|
|
331
|
+
let records = [];
|
|
332
|
+
|
|
333
|
+
try {
|
|
334
|
+
records = await (0, _query.queryAll)(queryAllParams);
|
|
335
|
+
} catch (ex) {
|
|
336
|
+
throw new _error.default(ex.message || "Could not load all records.", ex.code || "LOAD_ALL_RECORDS_ERROR", {
|
|
337
|
+
error: ex,
|
|
338
|
+
entry
|
|
339
|
+
});
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
const items = records.map(item => {
|
|
343
|
+
return entity.deleteBatch({
|
|
344
|
+
PK: item.PK,
|
|
345
|
+
SK: item.SK
|
|
346
|
+
});
|
|
347
|
+
});
|
|
348
|
+
|
|
349
|
+
try {
|
|
350
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
351
|
+
table: entity.table,
|
|
352
|
+
items
|
|
353
|
+
});
|
|
354
|
+
dataLoaders.clearAll({
|
|
355
|
+
model
|
|
356
|
+
});
|
|
357
|
+
} catch (ex) {
|
|
358
|
+
throw new _error.default(ex.message || "Could not delete the entry.", ex.code || "DELETE_ENTRY_ERROR", {
|
|
359
|
+
error: ex,
|
|
360
|
+
partitionKey: queryAllParams.partitionKey,
|
|
361
|
+
entry
|
|
362
|
+
});
|
|
363
|
+
}
|
|
364
|
+
};
|
|
365
|
+
|
|
366
|
+
const deleteRevision = async (model, params) => {
|
|
367
|
+
const {
|
|
368
|
+
entry,
|
|
369
|
+
latestEntry,
|
|
370
|
+
latestStorageEntry: initialLatestStorageEntry
|
|
371
|
+
} = params;
|
|
372
|
+
const partitionKey = (0, _keys.createPartitionKey)({
|
|
373
|
+
id: entry.id,
|
|
374
|
+
locale: model.locale,
|
|
375
|
+
tenant: model.tenant
|
|
376
|
+
});
|
|
377
|
+
const items = [entity.deleteBatch({
|
|
378
|
+
PK: partitionKey,
|
|
379
|
+
SK: (0, _keys.createRevisionSortKey)(entry)
|
|
380
|
+
})];
|
|
381
|
+
const publishedStorageEntry = await getPublishedRevisionByEntryId(model, entry);
|
|
382
|
+
/**
|
|
383
|
+
* If revision we are deleting is the published one as well, we need to delete those records as well.
|
|
384
|
+
*/
|
|
385
|
+
|
|
386
|
+
if (publishedStorageEntry && entry.id === publishedStorageEntry.id) {
|
|
387
|
+
items.push(entity.deleteBatch({
|
|
388
|
+
PK: partitionKey,
|
|
389
|
+
SK: (0, _keys.createPublishedSortKey)()
|
|
390
|
+
}));
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
if (initialLatestStorageEntry) {
|
|
394
|
+
const latestStorageEntry = convertToStorageEntry({
|
|
395
|
+
storageEntry: initialLatestStorageEntry,
|
|
396
|
+
model
|
|
397
|
+
});
|
|
398
|
+
items.push(entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, latestStorageEntry), {}, {
|
|
399
|
+
PK: partitionKey,
|
|
400
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
401
|
+
TYPE: createLatestType(),
|
|
402
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
403
|
+
GSI1_SK: (0, _keys.createGSISortKey)(latestStorageEntry)
|
|
404
|
+
})));
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
try {
|
|
408
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
409
|
+
table: entity.table,
|
|
410
|
+
items
|
|
411
|
+
});
|
|
412
|
+
dataLoaders.clearAll({
|
|
413
|
+
model
|
|
414
|
+
});
|
|
415
|
+
} catch (ex) {
|
|
416
|
+
throw new _error.default(ex.message, ex.code, {
|
|
417
|
+
error: ex,
|
|
418
|
+
entry,
|
|
419
|
+
latestEntry
|
|
420
|
+
});
|
|
421
|
+
}
|
|
422
|
+
};
|
|
423
|
+
|
|
424
|
+
const getLatestRevisionByEntryId = async (model, params) => {
|
|
425
|
+
const items = await dataLoaders.getLatestRevisionByEntryId({
|
|
426
|
+
model,
|
|
427
|
+
ids: [params.id]
|
|
428
|
+
});
|
|
429
|
+
const item = items.shift() || null;
|
|
430
|
+
|
|
431
|
+
if (!item) {
|
|
432
|
+
return null;
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
return convertFromStorageEntry({
|
|
436
|
+
storageEntry: item,
|
|
437
|
+
model
|
|
438
|
+
});
|
|
439
|
+
};
|
|
440
|
+
|
|
441
|
+
const getPublishedRevisionByEntryId = async (model, params) => {
|
|
442
|
+
const items = await dataLoaders.getPublishedRevisionByEntryId({
|
|
443
|
+
model,
|
|
444
|
+
ids: [params.id]
|
|
445
|
+
});
|
|
446
|
+
const item = items.shift() || null;
|
|
447
|
+
|
|
448
|
+
if (!item) {
|
|
449
|
+
return null;
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
return convertFromStorageEntry({
|
|
453
|
+
storageEntry: item,
|
|
454
|
+
model
|
|
455
|
+
});
|
|
456
|
+
};
|
|
457
|
+
|
|
458
|
+
const getRevisionById = async (model, params) => {
|
|
459
|
+
const items = await dataLoaders.getRevisionById({
|
|
460
|
+
model,
|
|
461
|
+
ids: [params.id]
|
|
462
|
+
});
|
|
463
|
+
const item = items.shift() || null;
|
|
464
|
+
|
|
465
|
+
if (!item) {
|
|
466
|
+
return null;
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
return convertFromStorageEntry({
|
|
470
|
+
storageEntry: item,
|
|
471
|
+
model
|
|
472
|
+
});
|
|
473
|
+
};
|
|
474
|
+
|
|
475
|
+
const getRevisions = async (model, params) => {
|
|
476
|
+
const items = await dataLoaders.getAllEntryRevisions({
|
|
477
|
+
model,
|
|
478
|
+
ids: [params.id]
|
|
479
|
+
});
|
|
480
|
+
return items.map(item => {
|
|
481
|
+
return convertFromStorageEntry({
|
|
482
|
+
storageEntry: item,
|
|
483
|
+
model
|
|
484
|
+
});
|
|
485
|
+
});
|
|
486
|
+
};
|
|
487
|
+
|
|
488
|
+
const getByIds = async (model, params) => {
|
|
489
|
+
const items = await dataLoaders.getRevisionById({
|
|
490
|
+
model,
|
|
491
|
+
ids: params.ids
|
|
492
|
+
});
|
|
493
|
+
return items.map(item => {
|
|
494
|
+
return convertFromStorageEntry({
|
|
495
|
+
storageEntry: item,
|
|
496
|
+
model
|
|
497
|
+
});
|
|
498
|
+
});
|
|
499
|
+
};
|
|
500
|
+
|
|
501
|
+
const getLatestByIds = async (model, params) => {
|
|
502
|
+
const items = await dataLoaders.getLatestRevisionByEntryId({
|
|
503
|
+
model,
|
|
504
|
+
ids: params.ids
|
|
505
|
+
});
|
|
506
|
+
return items.map(item => {
|
|
507
|
+
return convertFromStorageEntry({
|
|
508
|
+
storageEntry: item,
|
|
509
|
+
model
|
|
510
|
+
});
|
|
511
|
+
});
|
|
512
|
+
};
|
|
513
|
+
|
|
514
|
+
const getPublishedByIds = async (model, params) => {
|
|
515
|
+
const items = await dataLoaders.getPublishedRevisionByEntryId({
|
|
516
|
+
model,
|
|
517
|
+
ids: params.ids
|
|
518
|
+
});
|
|
519
|
+
return items.map(item => {
|
|
520
|
+
return convertFromStorageEntry({
|
|
521
|
+
storageEntry: item,
|
|
522
|
+
model
|
|
523
|
+
});
|
|
524
|
+
});
|
|
525
|
+
};
|
|
526
|
+
|
|
527
|
+
const getPreviousRevision = async (model, params) => {
|
|
528
|
+
const {
|
|
529
|
+
entryId,
|
|
530
|
+
version
|
|
531
|
+
} = params;
|
|
532
|
+
const queryParams = {
|
|
533
|
+
entity,
|
|
534
|
+
partitionKey: (0, _keys.createPartitionKey)({
|
|
535
|
+
tenant: model.tenant,
|
|
536
|
+
locale: model.locale,
|
|
537
|
+
id: entryId
|
|
538
|
+
}),
|
|
539
|
+
options: {
|
|
540
|
+
lt: `REV#${(0, _zeroPad.zeroPad)(version)}`,
|
|
541
|
+
|
|
542
|
+
/**
|
|
543
|
+
* We need to have extra checks because DynamoDB will return published or latest record if there is no REV# record.
|
|
544
|
+
*/
|
|
545
|
+
filters: [{
|
|
546
|
+
attr: "TYPE",
|
|
547
|
+
eq: createType()
|
|
548
|
+
}, {
|
|
549
|
+
attr: "version",
|
|
550
|
+
lt: version
|
|
551
|
+
}],
|
|
552
|
+
reverse: true
|
|
553
|
+
}
|
|
554
|
+
};
|
|
555
|
+
|
|
556
|
+
try {
|
|
557
|
+
const result = await (0, _query.queryOne)(queryParams);
|
|
558
|
+
const storageEntry = (0, _cleanup.cleanupItem)(entity, result);
|
|
559
|
+
|
|
560
|
+
if (!storageEntry) {
|
|
561
|
+
return null;
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
return convertFromStorageEntry({
|
|
565
|
+
storageEntry,
|
|
566
|
+
model
|
|
567
|
+
});
|
|
568
|
+
} catch (ex) {
|
|
569
|
+
throw new _error.default(ex.message || "Could not get previous version of given entry.", ex.code || "GET_PREVIOUS_VERSION_ERROR", (0, _objectSpread2.default)((0, _objectSpread2.default)({}, params), {}, {
|
|
570
|
+
error: ex,
|
|
571
|
+
partitionKey: queryParams.partitionKey,
|
|
572
|
+
options: queryParams.options,
|
|
573
|
+
model
|
|
574
|
+
}));
|
|
575
|
+
}
|
|
576
|
+
};
|
|
577
|
+
|
|
578
|
+
const list = async (model, params) => {
|
|
579
|
+
const {
|
|
580
|
+
limit: initialLimit = 10,
|
|
581
|
+
where: initialWhere,
|
|
582
|
+
after,
|
|
583
|
+
sort: sortBy,
|
|
584
|
+
fields,
|
|
585
|
+
search
|
|
586
|
+
} = params;
|
|
587
|
+
const limit = initialLimit <= 0 || initialLimit >= 10000 ? 10000 : initialLimit;
|
|
588
|
+
const type = initialWhere.published ? "P" : "L";
|
|
589
|
+
const queryAllParams = {
|
|
590
|
+
entity,
|
|
591
|
+
partitionKey: (0, _keys.createGSIPartitionKey)(model, type),
|
|
592
|
+
options: {
|
|
593
|
+
index: "GSI1",
|
|
594
|
+
gte: " "
|
|
595
|
+
}
|
|
596
|
+
};
|
|
597
|
+
let storageEntries = [];
|
|
598
|
+
|
|
599
|
+
try {
|
|
600
|
+
storageEntries = await (0, _query.queryAll)(queryAllParams);
|
|
601
|
+
} catch (ex) {
|
|
602
|
+
throw new _error.default(ex.message, "QUERY_ENTRIES_ERROR", {
|
|
603
|
+
error: ex,
|
|
604
|
+
partitionKey: queryAllParams.partitionKey,
|
|
605
|
+
options: queryAllParams.options
|
|
606
|
+
});
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
if (storageEntries.length === 0) {
|
|
610
|
+
return {
|
|
611
|
+
hasMoreItems: false,
|
|
612
|
+
totalCount: 0,
|
|
613
|
+
cursor: null,
|
|
614
|
+
items: []
|
|
615
|
+
};
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
const where = (0, _objectSpread2.default)({}, initialWhere);
|
|
619
|
+
delete where["published"];
|
|
620
|
+
delete where["latest"];
|
|
621
|
+
/**
|
|
622
|
+
* We need an object containing field, transformers and paths.
|
|
623
|
+
* Just build it here and pass on into other methods that require it to avoid mapping multiple times.
|
|
624
|
+
*/
|
|
625
|
+
|
|
626
|
+
const modelFields = (0, _createFields.createFields)({
|
|
627
|
+
plugins,
|
|
628
|
+
fields: model.fields
|
|
629
|
+
});
|
|
630
|
+
const fromStorage = createStorageTransformCallable(model);
|
|
631
|
+
/**
|
|
632
|
+
* Let's transform records from storage ones to regular ones, so we do not need to do it later.
|
|
633
|
+
*
|
|
634
|
+
* This is always being done, but at least its in parallel.
|
|
635
|
+
*/
|
|
636
|
+
|
|
637
|
+
const records = await Promise.all(storageEntries.map(async storageEntry => {
|
|
638
|
+
const entry = convertFromStorageEntry({
|
|
639
|
+
storageEntry,
|
|
640
|
+
model
|
|
641
|
+
});
|
|
642
|
+
|
|
643
|
+
for (const field of model.fields) {
|
|
644
|
+
entry.values[field.fieldId] = await fromStorage(field, entry.values[field.fieldId]);
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
return entry;
|
|
648
|
+
}));
|
|
649
|
+
/**
|
|
650
|
+
* Filter the read items via the code.
|
|
651
|
+
* It will build the filters out of the where input and transform the values it is using.
|
|
652
|
+
*/
|
|
653
|
+
|
|
654
|
+
const filteredItems = (0, _filtering.filter)({
|
|
655
|
+
items: records,
|
|
656
|
+
where,
|
|
657
|
+
plugins,
|
|
658
|
+
fields: modelFields,
|
|
659
|
+
fullTextSearch: {
|
|
660
|
+
term: search,
|
|
661
|
+
fields: fields || []
|
|
662
|
+
}
|
|
663
|
+
});
|
|
664
|
+
const totalCount = filteredItems.length;
|
|
665
|
+
/**
|
|
666
|
+
* Sorting is also done via the code.
|
|
667
|
+
* It takes the sort input and sorts by it via the lodash sortBy method.
|
|
668
|
+
*/
|
|
669
|
+
|
|
670
|
+
const sortedItems = (0, _filtering.sort)({
|
|
671
|
+
items: filteredItems,
|
|
672
|
+
sort: sortBy,
|
|
673
|
+
fields: modelFields
|
|
674
|
+
});
|
|
675
|
+
const start = parseInt((0, _cursor.decodeCursor)(after) || "0") || 0;
|
|
676
|
+
const hasMoreItems = totalCount > start + limit;
|
|
677
|
+
const end = limit > totalCount + start + limit ? undefined : start + limit;
|
|
678
|
+
const slicedItems = sortedItems.slice(start, end);
|
|
679
|
+
/**
|
|
680
|
+
* Although we do not need a cursor here, we will use it as such to keep it standardized.
|
|
681
|
+
* Number is simply encoded.
|
|
682
|
+
*/
|
|
683
|
+
|
|
684
|
+
const cursor = totalCount > start + limit ? (0, _cursor.encodeCursor)(`${start + limit}`) : null;
|
|
685
|
+
return {
|
|
686
|
+
hasMoreItems,
|
|
687
|
+
totalCount,
|
|
688
|
+
cursor,
|
|
689
|
+
items: (0, _cleanup.cleanupItems)(entity, slicedItems)
|
|
690
|
+
};
|
|
691
|
+
};
|
|
692
|
+
|
|
693
|
+
const get = async (model, params) => {
|
|
694
|
+
const {
|
|
695
|
+
items
|
|
696
|
+
} = await list(model, (0, _objectSpread2.default)((0, _objectSpread2.default)({}, params), {}, {
|
|
697
|
+
limit: 1
|
|
698
|
+
}));
|
|
699
|
+
return items.shift() || null;
|
|
700
|
+
};
|
|
701
|
+
|
|
702
|
+
const publish = async (model, params) => {
|
|
703
|
+
const {
|
|
704
|
+
entry,
|
|
705
|
+
storageEntry: initialStorageEntry
|
|
706
|
+
} = params;
|
|
707
|
+
const partitionKey = (0, _keys.createPartitionKey)({
|
|
708
|
+
id: entry.id,
|
|
709
|
+
locale: model.locale,
|
|
710
|
+
tenant: model.tenant
|
|
711
|
+
});
|
|
712
|
+
/**
|
|
713
|
+
* We need the latest and published entries to see if something needs to be updated along side the publishing one.
|
|
714
|
+
*/
|
|
715
|
+
|
|
716
|
+
const initialLatestStorageEntry = await getLatestRevisionByEntryId(model, entry);
|
|
717
|
+
const initialPublishedStorageEntry = await getPublishedRevisionByEntryId(model, entry);
|
|
718
|
+
const storageEntry = convertToStorageEntry({
|
|
719
|
+
model,
|
|
720
|
+
storageEntry: initialStorageEntry
|
|
721
|
+
});
|
|
722
|
+
/**
|
|
723
|
+
* We need to update:
|
|
724
|
+
* - current entry revision sort key
|
|
725
|
+
* - published sort key
|
|
726
|
+
* - latest sort key - if entry updated is actually latest
|
|
727
|
+
* - previous published entry to unpublished status - if any previously published entry
|
|
728
|
+
*/
|
|
729
|
+
|
|
730
|
+
const items = [entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
731
|
+
PK: partitionKey,
|
|
732
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
733
|
+
TYPE: createType(),
|
|
734
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
735
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
736
|
+
})), entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
737
|
+
PK: partitionKey,
|
|
738
|
+
SK: (0, _keys.createPublishedSortKey)(),
|
|
739
|
+
TYPE: createPublishedType(),
|
|
740
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "P"),
|
|
741
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
742
|
+
}))];
|
|
743
|
+
|
|
744
|
+
if (initialLatestStorageEntry && entry.id === initialLatestStorageEntry.id) {
|
|
745
|
+
items.push(entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
746
|
+
PK: partitionKey,
|
|
747
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
748
|
+
TYPE: createLatestType(),
|
|
749
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
750
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
751
|
+
})));
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
if (initialPublishedStorageEntry && initialPublishedStorageEntry.id !== entry.id) {
|
|
755
|
+
const publishedStorageEntry = convertToStorageEntry({
|
|
756
|
+
storageEntry: initialPublishedStorageEntry,
|
|
757
|
+
model
|
|
758
|
+
});
|
|
759
|
+
items.push(entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, publishedStorageEntry), {}, {
|
|
760
|
+
PK: partitionKey,
|
|
761
|
+
SK: (0, _keys.createRevisionSortKey)(publishedStorageEntry),
|
|
762
|
+
TYPE: createType(),
|
|
763
|
+
status: _types.CONTENT_ENTRY_STATUS.UNPUBLISHED,
|
|
764
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
765
|
+
GSI1_SK: (0, _keys.createGSISortKey)(publishedStorageEntry)
|
|
766
|
+
})));
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
try {
|
|
770
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
771
|
+
table: entity.table,
|
|
772
|
+
items
|
|
773
|
+
});
|
|
774
|
+
dataLoaders.clearAll({
|
|
775
|
+
model
|
|
776
|
+
});
|
|
777
|
+
return initialStorageEntry;
|
|
778
|
+
} catch (ex) {
|
|
779
|
+
throw new _error.default(ex.message || "Could not execute the publishing batch.", ex.code || "PUBLISH_ERROR", {
|
|
780
|
+
entry,
|
|
781
|
+
latestStorageEntry: initialLatestStorageEntry,
|
|
782
|
+
publishedStorageEntry: initialPublishedStorageEntry
|
|
783
|
+
});
|
|
784
|
+
}
|
|
785
|
+
};
|
|
786
|
+
|
|
787
|
+
const unpublish = async (model, params) => {
|
|
788
|
+
const {
|
|
789
|
+
entry,
|
|
790
|
+
storageEntry: initialStorageEntry
|
|
791
|
+
} = params;
|
|
792
|
+
const partitionKey = (0, _keys.createPartitionKey)({
|
|
793
|
+
id: entry.id,
|
|
794
|
+
locale: model.locale,
|
|
795
|
+
tenant: model.tenant
|
|
796
|
+
});
|
|
797
|
+
const storageEntry = convertToStorageEntry({
|
|
798
|
+
storageEntry: initialStorageEntry,
|
|
799
|
+
model
|
|
800
|
+
});
|
|
801
|
+
/**
|
|
802
|
+
* We need to:
|
|
803
|
+
* - delete currently published entry
|
|
804
|
+
* - update current entry revision with new data
|
|
805
|
+
* - update latest entry status - if entry being unpublished is latest
|
|
806
|
+
*/
|
|
807
|
+
|
|
808
|
+
const items = [entity.deleteBatch({
|
|
809
|
+
PK: partitionKey,
|
|
810
|
+
SK: (0, _keys.createPublishedSortKey)()
|
|
811
|
+
}), entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
812
|
+
PK: partitionKey,
|
|
813
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
814
|
+
TYPE: createType(),
|
|
815
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "A"),
|
|
816
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
817
|
+
}))];
|
|
818
|
+
/**
|
|
819
|
+
* We need the latest entry to see if something needs to be updated along side the unpublishing one.
|
|
820
|
+
*/
|
|
821
|
+
|
|
822
|
+
const latestStorageEntry = await getLatestRevisionByEntryId(model, entry);
|
|
823
|
+
|
|
824
|
+
if (latestStorageEntry && entry.id === latestStorageEntry.id) {
|
|
825
|
+
items.push(entity.putBatch((0, _objectSpread2.default)((0, _objectSpread2.default)({}, storageEntry), {}, {
|
|
826
|
+
PK: partitionKey,
|
|
827
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
828
|
+
TYPE: createLatestType(),
|
|
829
|
+
GSI1_PK: (0, _keys.createGSIPartitionKey)(model, "L"),
|
|
830
|
+
GSI1_SK: (0, _keys.createGSISortKey)(entry)
|
|
831
|
+
})));
|
|
832
|
+
}
|
|
833
|
+
|
|
834
|
+
try {
|
|
835
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
836
|
+
table: entity.table,
|
|
837
|
+
items
|
|
838
|
+
});
|
|
839
|
+
dataLoaders.clearAll({
|
|
840
|
+
model
|
|
841
|
+
});
|
|
842
|
+
return initialStorageEntry;
|
|
843
|
+
} catch (ex) {
|
|
844
|
+
throw new _error.default(ex.message || "Could not execute unpublish batch.", ex.code || "UNPUBLISH_ERROR", {
|
|
845
|
+
entry,
|
|
846
|
+
storageEntry
|
|
847
|
+
});
|
|
848
|
+
}
|
|
849
|
+
};
|
|
850
|
+
|
|
851
|
+
return {
|
|
852
|
+
create,
|
|
853
|
+
createRevisionFrom,
|
|
854
|
+
update,
|
|
855
|
+
delete: deleteEntry,
|
|
856
|
+
deleteRevision,
|
|
857
|
+
getPreviousRevision,
|
|
858
|
+
getPublishedByIds,
|
|
859
|
+
getLatestByIds,
|
|
860
|
+
getByIds,
|
|
861
|
+
getRevisionById,
|
|
862
|
+
getPublishedRevisionByEntryId,
|
|
863
|
+
getLatestRevisionByEntryId,
|
|
864
|
+
get,
|
|
865
|
+
getRevisions,
|
|
866
|
+
publish,
|
|
867
|
+
list,
|
|
868
|
+
unpublish
|
|
869
|
+
};
|
|
870
|
+
};
|
|
871
|
+
|
|
872
|
+
exports.createEntriesStorageOperations = createEntriesStorageOperations;
|