@webiny/api-headless-cms-ddb-es 0.0.0-mt-1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +35 -0
- package/configurations.d.ts +12 -0
- package/configurations.js +32 -0
- package/definitions/entry.d.ts +8 -0
- package/definitions/entry.js +91 -0
- package/definitions/entryElasticsearch.d.ts +8 -0
- package/definitions/entryElasticsearch.js +46 -0
- package/definitions/group.d.ts +8 -0
- package/definitions/group.js +74 -0
- package/definitions/model.d.ts +8 -0
- package/definitions/model.js +96 -0
- package/definitions/settings.d.ts +8 -0
- package/definitions/settings.js +62 -0
- package/definitions/system.d.ts +8 -0
- package/definitions/system.js +50 -0
- package/definitions/table.d.ts +8 -0
- package/definitions/table.js +24 -0
- package/definitions/tableElasticsearch.d.ts +8 -0
- package/definitions/tableElasticsearch.js +24 -0
- package/dynamoDb/index.d.ts +2 -0
- package/dynamoDb/index.js +18 -0
- package/dynamoDb/storage/date.d.ts +3 -0
- package/dynamoDb/storage/date.js +65 -0
- package/dynamoDb/storage/longText.d.ts +7 -0
- package/dynamoDb/storage/longText.js +83 -0
- package/dynamoDb/storage/richText.d.ts +8 -0
- package/dynamoDb/storage/richText.js +110 -0
- package/elasticsearch/index.d.ts +2 -0
- package/elasticsearch/index.js +16 -0
- package/elasticsearch/indexing/dateTimeIndexing.d.ts +3 -0
- package/elasticsearch/indexing/dateTimeIndexing.js +89 -0
- package/elasticsearch/indexing/defaultFieldIndexing.d.ts +3 -0
- package/elasticsearch/indexing/defaultFieldIndexing.js +47 -0
- package/elasticsearch/indexing/index.d.ts +2 -0
- package/elasticsearch/indexing/index.js +24 -0
- package/elasticsearch/indexing/longTextIndexing.d.ts +3 -0
- package/elasticsearch/indexing/longTextIndexing.js +36 -0
- package/elasticsearch/indexing/numberIndexing.d.ts +3 -0
- package/elasticsearch/indexing/numberIndexing.js +48 -0
- package/elasticsearch/indexing/objectIndexing.d.ts +3 -0
- package/elasticsearch/indexing/objectIndexing.js +200 -0
- package/elasticsearch/indexing/richTextIndexing.d.ts +3 -0
- package/elasticsearch/indexing/richTextIndexing.js +34 -0
- package/elasticsearch/search/index.d.ts +3 -0
- package/elasticsearch/search/index.js +16 -0
- package/elasticsearch/search/refSearch.d.ts +3 -0
- package/elasticsearch/search/refSearch.js +24 -0
- package/elasticsearch/search/timeSearch.d.ts +3 -0
- package/elasticsearch/search/timeSearch.js +25 -0
- package/helpers/createElasticsearchQueryBody.d.ts +11 -0
- package/helpers/createElasticsearchQueryBody.js +375 -0
- package/helpers/entryIndexHelpers.d.ts +18 -0
- package/helpers/entryIndexHelpers.js +189 -0
- package/helpers/fields.d.ts +77 -0
- package/helpers/fields.js +174 -0
- package/helpers/index.d.ts +2 -0
- package/helpers/index.js +31 -0
- package/helpers/operatorPluginsList.d.ts +7 -0
- package/helpers/operatorPluginsList.js +30 -0
- package/helpers/searchPluginsList.d.ts +6 -0
- package/helpers/searchPluginsList.js +26 -0
- package/helpers/transformValueForSearch.d.ts +9 -0
- package/helpers/transformValueForSearch.js +26 -0
- package/index.d.ts +2 -0
- package/index.js +171 -0
- package/operations/entry/dataLoaders.d.ts +47 -0
- package/operations/entry/dataLoaders.js +347 -0
- package/operations/entry/elasticsearchFields.d.ts +2 -0
- package/operations/entry/elasticsearchFields.js +32 -0
- package/operations/entry/fields.d.ts +3 -0
- package/operations/entry/fields.js +60 -0
- package/operations/entry/index.d.ts +13 -0
- package/operations/entry/index.js +1152 -0
- package/operations/entry/keys.d.ts +12 -0
- package/operations/entry/keys.js +40 -0
- package/operations/group/index.d.ts +8 -0
- package/operations/group/index.js +202 -0
- package/operations/model/index.d.ts +8 -0
- package/operations/model/index.js +205 -0
- package/operations/settings/index.d.ts +6 -0
- package/operations/settings/index.js +141 -0
- package/operations/system/createElasticsearchTemplate.d.ts +5 -0
- package/operations/system/createElasticsearchTemplate.js +62 -0
- package/operations/system/index.d.ts +6 -0
- package/operations/system/index.js +105 -0
- package/package.json +73 -0
- package/plugins/CmsEntryElasticsearchBodyModifierPlugin.d.ts +17 -0
- package/plugins/CmsEntryElasticsearchBodyModifierPlugin.js +24 -0
- package/plugins/CmsEntryElasticsearchFieldPlugin.d.ts +12 -0
- package/plugins/CmsEntryElasticsearchFieldPlugin.js +24 -0
- package/plugins/CmsEntryElasticsearchQueryModifierPlugin.d.ts +17 -0
- package/plugins/CmsEntryElasticsearchQueryModifierPlugin.js +24 -0
- package/plugins/CmsEntryElasticsearchSortModifierPlugin.d.ts +17 -0
- package/plugins/CmsEntryElasticsearchSortModifierPlugin.js +24 -0
- package/types.d.ts +191 -0
- package/types.js +60 -0
- package/upgrades/index.d.ts +2 -0
- package/upgrades/index.js +16 -0
- package/upgrades/utils.d.ts +1 -0
- package/upgrades/utils.js +16 -0
- package/upgrades/v5.0.0/cleanDatabaseRecord.d.ts +6 -0
- package/upgrades/v5.0.0/cleanDatabaseRecord.js +16 -0
- package/upgrades/v5.0.0/createOldVersionIndiceName.d.ts +2 -0
- package/upgrades/v5.0.0/createOldVersionIndiceName.js +12 -0
- package/upgrades/v5.0.0/entryValueFixer.d.ts +4 -0
- package/upgrades/v5.0.0/entryValueFixer.js +124 -0
- package/upgrades/v5.0.0/fieldFinder.d.ts +6 -0
- package/upgrades/v5.0.0/fieldFinder.js +42 -0
- package/upgrades/v5.0.0/helpers.d.ts +4 -0
- package/upgrades/v5.0.0/helpers.js +57 -0
- package/upgrades/v5.0.0/index.d.ts +4 -0
- package/upgrades/v5.0.0/index.js +232 -0
- package/upgrades/v5.8.0/index.d.ts +4 -0
- package/upgrades/v5.8.0/index.js +426 -0
|
@@ -0,0 +1,1152 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
|
|
3
|
+
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
|
|
4
|
+
|
|
5
|
+
Object.defineProperty(exports, "__esModule", {
|
|
6
|
+
value: true
|
|
7
|
+
});
|
|
8
|
+
exports.createPublishedType = exports.createLatestType = exports.createEntriesStorageOperations = void 0;
|
|
9
|
+
|
|
10
|
+
var _defineProperty2 = _interopRequireDefault(require("@babel/runtime/helpers/defineProperty"));
|
|
11
|
+
|
|
12
|
+
var _types = require("@webiny/api-headless-cms/types");
|
|
13
|
+
|
|
14
|
+
var _helpers = require("../../helpers");
|
|
15
|
+
|
|
16
|
+
var _configurations = _interopRequireDefault(require("../../configurations"));
|
|
17
|
+
|
|
18
|
+
var _error = _interopRequireDefault(require("@webiny/error"));
|
|
19
|
+
|
|
20
|
+
var _lodash = _interopRequireDefault(require("lodash.clonedeep"));
|
|
21
|
+
|
|
22
|
+
var _lodash2 = _interopRequireDefault(require("lodash.omit"));
|
|
23
|
+
|
|
24
|
+
var _compression = require("@webiny/api-elasticsearch/compression");
|
|
25
|
+
|
|
26
|
+
var _batchWrite = require("@webiny/db-dynamodb/utils/batchWrite");
|
|
27
|
+
|
|
28
|
+
var _dataLoaders = require("./dataLoaders");
|
|
29
|
+
|
|
30
|
+
var _keys = require("./keys");
|
|
31
|
+
|
|
32
|
+
var _query = require("@webiny/db-dynamodb/utils/query");
|
|
33
|
+
|
|
34
|
+
var _limit = require("@webiny/api-elasticsearch/limit");
|
|
35
|
+
|
|
36
|
+
var _cursors = require("@webiny/api-elasticsearch/cursors");
|
|
37
|
+
|
|
38
|
+
var _get = require("@webiny/db-dynamodb/utils/get");
|
|
39
|
+
|
|
40
|
+
var _utils = require("@webiny/utils");
|
|
41
|
+
|
|
42
|
+
var _cleanup = require("@webiny/db-dynamodb/utils/cleanup");
|
|
43
|
+
|
|
44
|
+
function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) { symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); } keys.push.apply(keys, symbols); } return keys; }
|
|
45
|
+
|
|
46
|
+
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { (0, _defineProperty2.default)(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; }
|
|
47
|
+
|
|
48
|
+
const createType = () => {
|
|
49
|
+
return "cms.entry";
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
const createLatestType = () => {
|
|
53
|
+
return `${createType()}.l`;
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
exports.createLatestType = createLatestType;
|
|
57
|
+
|
|
58
|
+
const createPublishedType = () => {
|
|
59
|
+
return `${createType()}.p`;
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
exports.createPublishedType = createPublishedType;
|
|
63
|
+
|
|
64
|
+
const getEntryData = entry => {
|
|
65
|
+
return _objectSpread(_objectSpread({}, (0, _lodash2.default)(entry, ["PK", "SK", "published", "latest"])), {}, {
|
|
66
|
+
TYPE: createType(),
|
|
67
|
+
__type: createType()
|
|
68
|
+
});
|
|
69
|
+
};
|
|
70
|
+
|
|
71
|
+
const getESLatestEntryData = async (plugins, entry) => {
|
|
72
|
+
return (0, _compression.compress)(plugins, _objectSpread(_objectSpread({}, getEntryData(entry)), {}, {
|
|
73
|
+
latest: true,
|
|
74
|
+
TYPE: createLatestType(),
|
|
75
|
+
__type: createLatestType()
|
|
76
|
+
}));
|
|
77
|
+
};
|
|
78
|
+
|
|
79
|
+
const getESPublishedEntryData = async (plugins, entry) => {
|
|
80
|
+
return (0, _compression.compress)(plugins, _objectSpread(_objectSpread({}, getEntryData(entry)), {}, {
|
|
81
|
+
published: true,
|
|
82
|
+
TYPE: createPublishedType(),
|
|
83
|
+
__type: createPublishedType()
|
|
84
|
+
}));
|
|
85
|
+
};
|
|
86
|
+
|
|
87
|
+
const createEntriesStorageOperations = params => {
|
|
88
|
+
const {
|
|
89
|
+
entity,
|
|
90
|
+
esEntity,
|
|
91
|
+
elasticsearch,
|
|
92
|
+
plugins
|
|
93
|
+
} = params;
|
|
94
|
+
const dataLoaders = new _dataLoaders.DataLoadersHandler({
|
|
95
|
+
entity
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
const create = async (model, params) => {
|
|
99
|
+
const {
|
|
100
|
+
entry,
|
|
101
|
+
storageEntry
|
|
102
|
+
} = params;
|
|
103
|
+
const esEntry = (0, _helpers.prepareEntryToIndex)({
|
|
104
|
+
plugins,
|
|
105
|
+
model,
|
|
106
|
+
entry: (0, _lodash.default)(entry),
|
|
107
|
+
storageEntry: (0, _lodash.default)(storageEntry)
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
const {
|
|
111
|
+
index: esIndex
|
|
112
|
+
} = _configurations.default.es({
|
|
113
|
+
model
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
const esLatestData = await getESLatestEntryData(plugins, esEntry);
|
|
117
|
+
const revisionKeys = {
|
|
118
|
+
PK: (0, _keys.createPartitionKey)(entry),
|
|
119
|
+
SK: (0, _keys.createRevisionSortKey)(entry)
|
|
120
|
+
};
|
|
121
|
+
const latestKeys = {
|
|
122
|
+
PK: (0, _keys.createPartitionKey)(entry),
|
|
123
|
+
SK: (0, _keys.createLatestSortKey)()
|
|
124
|
+
};
|
|
125
|
+
const items = [entity.putBatch(_objectSpread(_objectSpread(_objectSpread({}, storageEntry), revisionKeys), {}, {
|
|
126
|
+
TYPE: createType()
|
|
127
|
+
})), entity.putBatch(_objectSpread(_objectSpread(_objectSpread({}, storageEntry), latestKeys), {}, {
|
|
128
|
+
TYPE: createLatestType()
|
|
129
|
+
}))];
|
|
130
|
+
|
|
131
|
+
try {
|
|
132
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
133
|
+
table: entity.table,
|
|
134
|
+
items
|
|
135
|
+
});
|
|
136
|
+
dataLoaders.clearAll({
|
|
137
|
+
model
|
|
138
|
+
});
|
|
139
|
+
} catch (ex) {
|
|
140
|
+
throw new _error.default(ex.message || "Could not insert entry data into the DynamoDB table.", ex.code || "CREATE_ENTRY_ERROR", {
|
|
141
|
+
error: ex,
|
|
142
|
+
entry,
|
|
143
|
+
storageEntry
|
|
144
|
+
});
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
try {
|
|
148
|
+
await esEntity.put(_objectSpread(_objectSpread({}, latestKeys), {}, {
|
|
149
|
+
index: esIndex,
|
|
150
|
+
data: esLatestData
|
|
151
|
+
}));
|
|
152
|
+
} catch (ex) {
|
|
153
|
+
throw new _error.default(ex.message || "Could not insert entry data into the Elasticsearch DynamoDB table.", ex.code || "CREATE_ES_ENTRY_ERROR", {
|
|
154
|
+
error: ex,
|
|
155
|
+
entry,
|
|
156
|
+
esEntry
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
return storageEntry;
|
|
161
|
+
};
|
|
162
|
+
|
|
163
|
+
const createRevisionFrom = async (model, params) => {
|
|
164
|
+
const {
|
|
165
|
+
originalEntry,
|
|
166
|
+
entry,
|
|
167
|
+
storageEntry
|
|
168
|
+
} = params;
|
|
169
|
+
const revisionKeys = {
|
|
170
|
+
PK: (0, _keys.createPartitionKey)(entry),
|
|
171
|
+
SK: (0, _keys.createRevisionSortKey)(entry)
|
|
172
|
+
};
|
|
173
|
+
const latestKeys = {
|
|
174
|
+
PK: (0, _keys.createPartitionKey)(entry),
|
|
175
|
+
SK: (0, _keys.createLatestSortKey)()
|
|
176
|
+
};
|
|
177
|
+
const esEntry = (0, _helpers.prepareEntryToIndex)({
|
|
178
|
+
plugins,
|
|
179
|
+
model,
|
|
180
|
+
entry: (0, _lodash.default)(entry),
|
|
181
|
+
storageEntry: (0, _lodash.default)(storageEntry)
|
|
182
|
+
});
|
|
183
|
+
const esLatestData = await getESLatestEntryData(plugins, esEntry);
|
|
184
|
+
const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
185
|
+
TYPE: createType()
|
|
186
|
+
}, revisionKeys)), entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
187
|
+
TYPE: createLatestType()
|
|
188
|
+
}, latestKeys))];
|
|
189
|
+
|
|
190
|
+
const {
|
|
191
|
+
index
|
|
192
|
+
} = _configurations.default.es({
|
|
193
|
+
model
|
|
194
|
+
});
|
|
195
|
+
|
|
196
|
+
try {
|
|
197
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
198
|
+
table: entity.table,
|
|
199
|
+
items
|
|
200
|
+
});
|
|
201
|
+
dataLoaders.clearAll({
|
|
202
|
+
model
|
|
203
|
+
});
|
|
204
|
+
} catch (ex) {
|
|
205
|
+
throw new _error.default(ex.message || "Could not create revision from given entry in the DynamoDB table.", ex.code || "CREATE_REVISION_ERROR", {
|
|
206
|
+
error: ex,
|
|
207
|
+
originalEntry,
|
|
208
|
+
entry,
|
|
209
|
+
storageEntry
|
|
210
|
+
});
|
|
211
|
+
}
|
|
212
|
+
/**
|
|
213
|
+
* Update the "latest" entry item in the Elasticsearch
|
|
214
|
+
*/
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
try {
|
|
218
|
+
await esEntity.put(_objectSpread(_objectSpread({}, latestKeys), {}, {
|
|
219
|
+
index,
|
|
220
|
+
data: esLatestData
|
|
221
|
+
}));
|
|
222
|
+
} catch (ex) {
|
|
223
|
+
throw new _error.default(ex.message || "Could not update latest entry in the DynamoDB Elasticsearch table.", ex.code || "CREATE_REVISION_ERROR", {
|
|
224
|
+
error: ex,
|
|
225
|
+
originalEntry,
|
|
226
|
+
entry
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
/**
|
|
230
|
+
* There are no modifications on the entry created so just return the data.
|
|
231
|
+
*/
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
return storageEntry;
|
|
235
|
+
};
|
|
236
|
+
|
|
237
|
+
const update = async (model, params) => {
|
|
238
|
+
const {
|
|
239
|
+
originalEntry,
|
|
240
|
+
entry,
|
|
241
|
+
storageEntry
|
|
242
|
+
} = params;
|
|
243
|
+
const revisionKeys = {
|
|
244
|
+
PK: (0, _keys.createPartitionKey)(entry),
|
|
245
|
+
SK: (0, _keys.createRevisionSortKey)(entry)
|
|
246
|
+
};
|
|
247
|
+
const latestKeys = {
|
|
248
|
+
PK: (0, _keys.createPartitionKey)(entry),
|
|
249
|
+
SK: (0, _keys.createLatestSortKey)()
|
|
250
|
+
};
|
|
251
|
+
/**
|
|
252
|
+
* We need the latest entry to check if it needs to be updated.
|
|
253
|
+
*/
|
|
254
|
+
|
|
255
|
+
const [latestStorageEntry] = await dataLoaders.getLatestRevisionByEntryId({
|
|
256
|
+
model,
|
|
257
|
+
ids: [originalEntry.id]
|
|
258
|
+
});
|
|
259
|
+
const items = [entity.putBatch(_objectSpread(_objectSpread(_objectSpread({}, storageEntry), revisionKeys), {}, {
|
|
260
|
+
TYPE: createType()
|
|
261
|
+
}))];
|
|
262
|
+
/**
|
|
263
|
+
* If the latest entry is the one being updated, we need to create a new latest entry records.
|
|
264
|
+
*/
|
|
265
|
+
|
|
266
|
+
let elasticsearchLatestData = null;
|
|
267
|
+
|
|
268
|
+
if (latestStorageEntry.id === originalEntry.id) {
|
|
269
|
+
/**
|
|
270
|
+
* First we update the regular DynamoDB table
|
|
271
|
+
*/
|
|
272
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread(_objectSpread({}, storageEntry), latestKeys), {}, {
|
|
273
|
+
TYPE: (0, _keys.createLatestSortKey)()
|
|
274
|
+
})));
|
|
275
|
+
/**
|
|
276
|
+
* And then update the Elasticsearch table to propagate changes to the Elasticsearch
|
|
277
|
+
*/
|
|
278
|
+
|
|
279
|
+
const esEntry = (0, _helpers.prepareEntryToIndex)({
|
|
280
|
+
plugins,
|
|
281
|
+
model,
|
|
282
|
+
entry: (0, _lodash.default)(entry),
|
|
283
|
+
storageEntry: (0, _lodash.default)(storageEntry)
|
|
284
|
+
});
|
|
285
|
+
elasticsearchLatestData = await getESLatestEntryData(plugins, esEntry);
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
try {
|
|
289
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
290
|
+
table: entity.table,
|
|
291
|
+
items
|
|
292
|
+
});
|
|
293
|
+
dataLoaders.clearAll({
|
|
294
|
+
model
|
|
295
|
+
});
|
|
296
|
+
} catch (ex) {
|
|
297
|
+
throw new _error.default(ex.message || "Could not update entry DynamoDB records.", ex.code || "UPDATE_ENTRY_ERROR", {
|
|
298
|
+
error: ex,
|
|
299
|
+
originalEntry,
|
|
300
|
+
entry,
|
|
301
|
+
storageEntry
|
|
302
|
+
});
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
if (!elasticsearchLatestData) {
|
|
306
|
+
return storageEntry;
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
const {
|
|
310
|
+
index: esIndex
|
|
311
|
+
} = _configurations.default.es({
|
|
312
|
+
model
|
|
313
|
+
});
|
|
314
|
+
|
|
315
|
+
try {
|
|
316
|
+
await esEntity.put(_objectSpread(_objectSpread({}, latestKeys), {}, {
|
|
317
|
+
index: esIndex,
|
|
318
|
+
data: elasticsearchLatestData
|
|
319
|
+
}));
|
|
320
|
+
} catch (ex) {
|
|
321
|
+
throw new _error.default(ex.message || "Could not update entry DynamoDB Elasticsearch record.", ex.code || "UPDATE_ES_ENTRY_ERROR", {
|
|
322
|
+
error: ex,
|
|
323
|
+
originalEntry,
|
|
324
|
+
entry
|
|
325
|
+
});
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
return storageEntry;
|
|
329
|
+
};
|
|
330
|
+
|
|
331
|
+
const deleteEntry = async (model, params) => {
|
|
332
|
+
const {
|
|
333
|
+
entry
|
|
334
|
+
} = params;
|
|
335
|
+
const partitionKey = (0, _keys.createPartitionKey)(entry);
|
|
336
|
+
const items = await (0, _query.queryAll)({
|
|
337
|
+
entity,
|
|
338
|
+
partitionKey,
|
|
339
|
+
options: {
|
|
340
|
+
gte: " "
|
|
341
|
+
}
|
|
342
|
+
});
|
|
343
|
+
const esItems = await (0, _query.queryAll)({
|
|
344
|
+
entity: esEntity,
|
|
345
|
+
partitionKey,
|
|
346
|
+
options: {
|
|
347
|
+
gte: " "
|
|
348
|
+
}
|
|
349
|
+
});
|
|
350
|
+
const deleteItems = items.map(item => {
|
|
351
|
+
return entity.deleteBatch({
|
|
352
|
+
PK: item.PK,
|
|
353
|
+
SK: item.SK
|
|
354
|
+
});
|
|
355
|
+
});
|
|
356
|
+
const deleteEsItems = esItems.map(item => {
|
|
357
|
+
return esEntity.deleteBatch({
|
|
358
|
+
PK: item.PK,
|
|
359
|
+
SK: item.SK
|
|
360
|
+
});
|
|
361
|
+
});
|
|
362
|
+
|
|
363
|
+
try {
|
|
364
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
365
|
+
table: entity.table,
|
|
366
|
+
items: deleteItems
|
|
367
|
+
});
|
|
368
|
+
dataLoaders.clearAll({
|
|
369
|
+
model
|
|
370
|
+
});
|
|
371
|
+
} catch (ex) {
|
|
372
|
+
throw new _error.default(ex.message || "Could not delete entry records from DynamoDB table.", ex.code || "DELETE_ENTRY_ERROR", {
|
|
373
|
+
error: ex,
|
|
374
|
+
entry
|
|
375
|
+
});
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
try {
|
|
379
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
380
|
+
table: esEntity.table,
|
|
381
|
+
items: deleteEsItems
|
|
382
|
+
});
|
|
383
|
+
} catch (ex) {
|
|
384
|
+
throw new _error.default(ex.message || "Could not delete entry records from DynamoDB Elasticsearch table.", ex.code || "DELETE_ENTRY_ERROR", {
|
|
385
|
+
error: ex,
|
|
386
|
+
entry
|
|
387
|
+
});
|
|
388
|
+
}
|
|
389
|
+
};
|
|
390
|
+
|
|
391
|
+
const deleteRevision = async (model, params) => {
|
|
392
|
+
const {
|
|
393
|
+
entryToDelete,
|
|
394
|
+
entryToSetAsLatest,
|
|
395
|
+
storageEntryToSetAsLatest
|
|
396
|
+
} = params;
|
|
397
|
+
const partitionKey = (0, _keys.createPartitionKey)(entryToDelete);
|
|
398
|
+
|
|
399
|
+
const {
|
|
400
|
+
index
|
|
401
|
+
} = _configurations.default.es({
|
|
402
|
+
model
|
|
403
|
+
});
|
|
404
|
+
/**
|
|
405
|
+
* We need published entry to delete it if necessary.
|
|
406
|
+
*/
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
const [publishedStorageEntry] = await dataLoaders.getPublishedRevisionByEntryId({
|
|
410
|
+
model,
|
|
411
|
+
ids: [entryToDelete.id]
|
|
412
|
+
});
|
|
413
|
+
/**
|
|
414
|
+
* We need to delete all existing records of the given entry revision.
|
|
415
|
+
*/
|
|
416
|
+
|
|
417
|
+
const items = [
|
|
418
|
+
/**
|
|
419
|
+
* Delete records of given entry revision.
|
|
420
|
+
*/
|
|
421
|
+
entity.deleteBatch({
|
|
422
|
+
PK: partitionKey,
|
|
423
|
+
SK: (0, _keys.createRevisionSortKey)(entryToDelete)
|
|
424
|
+
})];
|
|
425
|
+
const esItems = [];
|
|
426
|
+
/**
|
|
427
|
+
* If revision we are deleting is the published one as well, we need to delete those records as well.
|
|
428
|
+
*/
|
|
429
|
+
|
|
430
|
+
if (publishedStorageEntry && entryToDelete.id === publishedStorageEntry.id) {
|
|
431
|
+
items.push(entity.deleteBatch({
|
|
432
|
+
PK: partitionKey,
|
|
433
|
+
SK: (0, _keys.createPublishedSortKey)()
|
|
434
|
+
}));
|
|
435
|
+
esItems.push(entity.deleteBatch({
|
|
436
|
+
PK: partitionKey,
|
|
437
|
+
SK: (0, _keys.createPublishedSortKey)()
|
|
438
|
+
}));
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
if (entryToSetAsLatest) {
|
|
442
|
+
const esEntry = (0, _helpers.prepareEntryToIndex)({
|
|
443
|
+
plugins,
|
|
444
|
+
model,
|
|
445
|
+
entry: (0, _lodash.default)(entryToSetAsLatest),
|
|
446
|
+
storageEntry: (0, _lodash.default)(storageEntryToSetAsLatest)
|
|
447
|
+
});
|
|
448
|
+
const esLatestData = await getESLatestEntryData(plugins, esEntry);
|
|
449
|
+
/**
|
|
450
|
+
* In the end we need to set the new latest entry
|
|
451
|
+
*/
|
|
452
|
+
|
|
453
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntryToSetAsLatest), {}, {
|
|
454
|
+
PK: partitionKey,
|
|
455
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
456
|
+
TYPE: createLatestType()
|
|
457
|
+
})));
|
|
458
|
+
esItems.push(esEntity.putBatch({
|
|
459
|
+
PK: partitionKey,
|
|
460
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
461
|
+
index,
|
|
462
|
+
data: esLatestData
|
|
463
|
+
}));
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
try {
|
|
467
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
468
|
+
table: entity.table,
|
|
469
|
+
items
|
|
470
|
+
});
|
|
471
|
+
dataLoaders.clearAll({
|
|
472
|
+
model
|
|
473
|
+
});
|
|
474
|
+
} catch (ex) {
|
|
475
|
+
throw new _error.default(ex.message || "Could not batch write entry records to DynamoDB table.", ex.code || "DELETE_REVISION_ERROR", {
|
|
476
|
+
error: ex,
|
|
477
|
+
entryToDelete,
|
|
478
|
+
entryToSetAsLatest,
|
|
479
|
+
storageEntryToSetAsLatest
|
|
480
|
+
});
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
if (esItems.length === 0) {
|
|
484
|
+
return;
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
try {
|
|
488
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
489
|
+
table: esEntity.table,
|
|
490
|
+
items: esItems
|
|
491
|
+
});
|
|
492
|
+
} catch (ex) {
|
|
493
|
+
throw new _error.default(ex.message || "Could not batch write entry records to DynamoDB Elasticsearch table.", ex.code || "DELETE_REVISION_ERROR", {
|
|
494
|
+
error: ex,
|
|
495
|
+
entryToDelete,
|
|
496
|
+
entryToSetAsLatest,
|
|
497
|
+
storageEntryToSetAsLatest
|
|
498
|
+
});
|
|
499
|
+
}
|
|
500
|
+
};
|
|
501
|
+
|
|
502
|
+
const list = async (model, params) => {
|
|
503
|
+
const limit = (0, _limit.createLimit)(params.limit, 50);
|
|
504
|
+
const body = (0, _helpers.createElasticsearchQueryBody)({
|
|
505
|
+
model,
|
|
506
|
+
args: _objectSpread(_objectSpread({}, params), {}, {
|
|
507
|
+
limit
|
|
508
|
+
}),
|
|
509
|
+
plugins,
|
|
510
|
+
parentPath: "values"
|
|
511
|
+
});
|
|
512
|
+
let response;
|
|
513
|
+
|
|
514
|
+
const {
|
|
515
|
+
index
|
|
516
|
+
} = _configurations.default.es({
|
|
517
|
+
model
|
|
518
|
+
});
|
|
519
|
+
|
|
520
|
+
try {
|
|
521
|
+
response = await elasticsearch.search({
|
|
522
|
+
index,
|
|
523
|
+
body
|
|
524
|
+
});
|
|
525
|
+
} catch (ex) {
|
|
526
|
+
throw new _error.default(ex.message, ex.code || "ELASTICSEARCH_ERROR", {
|
|
527
|
+
error: ex,
|
|
528
|
+
index,
|
|
529
|
+
body
|
|
530
|
+
});
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
const {
|
|
534
|
+
hits,
|
|
535
|
+
total
|
|
536
|
+
} = response.body.hits;
|
|
537
|
+
const items = (0, _helpers.extractEntriesFromIndex)({
|
|
538
|
+
plugins,
|
|
539
|
+
model,
|
|
540
|
+
entries: hits.map(item => item._source)
|
|
541
|
+
});
|
|
542
|
+
const hasMoreItems = items.length > limit;
|
|
543
|
+
|
|
544
|
+
if (hasMoreItems) {
|
|
545
|
+
/**
|
|
546
|
+
* Remove the last item from results, we don't want to include it.
|
|
547
|
+
*/
|
|
548
|
+
items.pop();
|
|
549
|
+
}
|
|
550
|
+
/**
|
|
551
|
+
* Cursor is the `sort` value of the last item in the array.
|
|
552
|
+
* https://www.elastic.co/guide/en/elasticsearch/reference/current/paginate-search-results.html#search-after
|
|
553
|
+
*/
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
const cursor = items.length > 0 ? (0, _cursors.encodeCursor)(hits[items.length - 1].sort) : null;
|
|
557
|
+
return {
|
|
558
|
+
hasMoreItems,
|
|
559
|
+
totalCount: total.value,
|
|
560
|
+
cursor,
|
|
561
|
+
items
|
|
562
|
+
};
|
|
563
|
+
};
|
|
564
|
+
|
|
565
|
+
const get = async (model, params) => {
|
|
566
|
+
const {
|
|
567
|
+
items
|
|
568
|
+
} = await list(model, _objectSpread(_objectSpread({}, params), {}, {
|
|
569
|
+
limit: 1
|
|
570
|
+
}));
|
|
571
|
+
|
|
572
|
+
if (items.length === 0) {
|
|
573
|
+
return null;
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
return items.shift();
|
|
577
|
+
};
|
|
578
|
+
|
|
579
|
+
const publish = async (model, params) => {
|
|
580
|
+
const {
|
|
581
|
+
entry,
|
|
582
|
+
storageEntry
|
|
583
|
+
} = params;
|
|
584
|
+
/**
|
|
585
|
+
* We need currently published entry to check if need to remove it.
|
|
586
|
+
*/
|
|
587
|
+
|
|
588
|
+
const [publishedStorageEntry] = await dataLoaders.getPublishedRevisionByEntryId({
|
|
589
|
+
model,
|
|
590
|
+
ids: [entry.id]
|
|
591
|
+
});
|
|
592
|
+
const revisionKeys = {
|
|
593
|
+
PK: (0, _keys.createPartitionKey)(entry),
|
|
594
|
+
SK: (0, _keys.createRevisionSortKey)(entry)
|
|
595
|
+
};
|
|
596
|
+
const latestKeys = {
|
|
597
|
+
PK: (0, _keys.createPartitionKey)(entry),
|
|
598
|
+
SK: (0, _keys.createLatestSortKey)()
|
|
599
|
+
};
|
|
600
|
+
const publishedKeys = {
|
|
601
|
+
PK: (0, _keys.createPartitionKey)(entry),
|
|
602
|
+
SK: (0, _keys.createPublishedSortKey)()
|
|
603
|
+
};
|
|
604
|
+
let latestEsEntry = null;
|
|
605
|
+
|
|
606
|
+
try {
|
|
607
|
+
latestEsEntry = await (0, _get.get)({
|
|
608
|
+
entity: esEntity,
|
|
609
|
+
keys: latestKeys
|
|
610
|
+
});
|
|
611
|
+
} catch (ex) {
|
|
612
|
+
throw new _error.default(ex.message || "Could not read Elasticsearch latest or published data.", ex.code || "PUBLISH_BATCH_READ", {
|
|
613
|
+
error: ex,
|
|
614
|
+
latestKeys: latestKeys,
|
|
615
|
+
publishedKeys: publishedKeys
|
|
616
|
+
});
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
const items = [entity.putBatch(_objectSpread(_objectSpread(_objectSpread({}, storageEntry), revisionKeys), {}, {
|
|
620
|
+
TYPE: createType()
|
|
621
|
+
}))];
|
|
622
|
+
const esItems = [];
|
|
623
|
+
|
|
624
|
+
const {
|
|
625
|
+
index
|
|
626
|
+
} = _configurations.default.es({
|
|
627
|
+
model
|
|
628
|
+
});
|
|
629
|
+
|
|
630
|
+
if (publishedStorageEntry) {
|
|
631
|
+
/**
|
|
632
|
+
* If there is a `published` entry already, we need to set it to `unpublished`. We need to
|
|
633
|
+
* execute two updates: update the previously published entry's status and the published entry record.
|
|
634
|
+
* DynamoDB does not support `batchUpdate` - so here we load the previously published
|
|
635
|
+
* entry's data to update its status within a batch operation. If, hopefully,
|
|
636
|
+
* they introduce a true update batch operation, remove this `read` call.
|
|
637
|
+
*/
|
|
638
|
+
const [previouslyPublishedEntry] = await dataLoaders.getRevisionById({
|
|
639
|
+
model,
|
|
640
|
+
ids: [publishedStorageEntry.id]
|
|
641
|
+
});
|
|
642
|
+
items.push(
|
|
643
|
+
/**
|
|
644
|
+
* Update currently published entry (unpublish it)
|
|
645
|
+
*/
|
|
646
|
+
entity.putBatch(_objectSpread(_objectSpread({}, previouslyPublishedEntry), {}, {
|
|
647
|
+
status: _types.CONTENT_ENTRY_STATUS.UNPUBLISHED,
|
|
648
|
+
savedOn: entry.savedOn,
|
|
649
|
+
TYPE: createType(),
|
|
650
|
+
PK: (0, _keys.createPartitionKey)(publishedStorageEntry),
|
|
651
|
+
SK: (0, _keys.createRevisionSortKey)(publishedStorageEntry)
|
|
652
|
+
})));
|
|
653
|
+
}
|
|
654
|
+
/**
|
|
655
|
+
* Update the helper item in DB with the new published entry
|
|
656
|
+
*/
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread(_objectSpread({}, storageEntry), publishedKeys), {}, {
|
|
660
|
+
TYPE: createPublishedType()
|
|
661
|
+
})));
|
|
662
|
+
/**
|
|
663
|
+
* We need the latest entry to check if it neds to be updated as well in the Elasticsearch.
|
|
664
|
+
*/
|
|
665
|
+
|
|
666
|
+
const [latestStorageEntry] = await dataLoaders.getLatestRevisionByEntryId({
|
|
667
|
+
model,
|
|
668
|
+
ids: [entry.id]
|
|
669
|
+
});
|
|
670
|
+
/**
|
|
671
|
+
* If we are publishing the latest revision, let's also update the latest revision's status in ES.
|
|
672
|
+
*/
|
|
673
|
+
|
|
674
|
+
if (latestStorageEntry && latestStorageEntry.id === entry.id) {
|
|
675
|
+
/**
|
|
676
|
+
* Need to decompress the data from Elasticsearch DynamoDB table.
|
|
677
|
+
*/
|
|
678
|
+
const latestEsEntryDataDecompressed = await (0, _compression.decompress)(plugins, latestEsEntry.data);
|
|
679
|
+
esItems.push(esEntity.putBatch({
|
|
680
|
+
index,
|
|
681
|
+
PK: (0, _keys.createPartitionKey)(latestEsEntryDataDecompressed),
|
|
682
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
683
|
+
data: _objectSpread(_objectSpread({}, latestEsEntryDataDecompressed), {}, {
|
|
684
|
+
status: _types.CONTENT_ENTRY_STATUS.PUBLISHED,
|
|
685
|
+
locked: true,
|
|
686
|
+
savedOn: entry.savedOn,
|
|
687
|
+
publishedOn: entry.publishedOn
|
|
688
|
+
})
|
|
689
|
+
}));
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
const preparedEntryData = (0, _helpers.prepareEntryToIndex)({
|
|
693
|
+
plugins,
|
|
694
|
+
model,
|
|
695
|
+
entry: (0, _lodash.default)(entry),
|
|
696
|
+
storageEntry: (0, _lodash.default)(storageEntry)
|
|
697
|
+
});
|
|
698
|
+
/**
|
|
699
|
+
* Update the published revision entry in ES.
|
|
700
|
+
*/
|
|
701
|
+
|
|
702
|
+
const esLatestData = await getESPublishedEntryData(plugins, preparedEntryData);
|
|
703
|
+
esItems.push(esEntity.putBatch(_objectSpread(_objectSpread({}, publishedKeys), {}, {
|
|
704
|
+
index,
|
|
705
|
+
data: esLatestData
|
|
706
|
+
})));
|
|
707
|
+
/**
|
|
708
|
+
* Finally, execute regular table batch.
|
|
709
|
+
*/
|
|
710
|
+
|
|
711
|
+
try {
|
|
712
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
713
|
+
table: entity.table,
|
|
714
|
+
items
|
|
715
|
+
});
|
|
716
|
+
dataLoaders.clearAll({
|
|
717
|
+
model
|
|
718
|
+
});
|
|
719
|
+
} catch (ex) {
|
|
720
|
+
throw new _error.default(ex.message || "Could not store publish entry records in DynamoDB table.", ex.code || "PUBLISH_ERROR", {
|
|
721
|
+
error: ex,
|
|
722
|
+
entry,
|
|
723
|
+
latestStorageEntry,
|
|
724
|
+
publishedStorageEntry
|
|
725
|
+
});
|
|
726
|
+
}
|
|
727
|
+
/**
|
|
728
|
+
* And Elasticsearch table batch.
|
|
729
|
+
*/
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
try {
|
|
733
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
734
|
+
table: esEntity.table,
|
|
735
|
+
items: esItems
|
|
736
|
+
});
|
|
737
|
+
} catch (ex) {
|
|
738
|
+
throw new _error.default(ex.message || "Could not store publish entry records in DynamoDB Elasticsearch table.", ex.code || "PUBLISH_ES_ERROR", {
|
|
739
|
+
error: ex,
|
|
740
|
+
entry,
|
|
741
|
+
latestStorageEntry,
|
|
742
|
+
publishedStorageEntry
|
|
743
|
+
});
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
return storageEntry;
|
|
747
|
+
};
|
|
748
|
+
|
|
749
|
+
const unpublish = async (model, params) => {
|
|
750
|
+
const {
|
|
751
|
+
entry,
|
|
752
|
+
storageEntry
|
|
753
|
+
} = params;
|
|
754
|
+
/**
|
|
755
|
+
* We need the latest entry to check if it needs to be updated.
|
|
756
|
+
*/
|
|
757
|
+
|
|
758
|
+
const [latestStorageEntry] = await dataLoaders.getLatestRevisionByEntryId({
|
|
759
|
+
model,
|
|
760
|
+
ids: [entry.id]
|
|
761
|
+
});
|
|
762
|
+
const partitionKey = (0, _keys.createPartitionKey)(entry);
|
|
763
|
+
const items = [entity.deleteBatch({
|
|
764
|
+
PK: partitionKey,
|
|
765
|
+
SK: (0, _keys.createPublishedSortKey)()
|
|
766
|
+
}), entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
767
|
+
PK: partitionKey,
|
|
768
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
769
|
+
TYPE: createType()
|
|
770
|
+
}))];
|
|
771
|
+
const esItems = [esEntity.deleteBatch({
|
|
772
|
+
PK: partitionKey,
|
|
773
|
+
SK: (0, _keys.createPublishedSortKey)()
|
|
774
|
+
})];
|
|
775
|
+
/**
|
|
776
|
+
* If we are unpublishing the latest revision, let's also update the latest revision entry's status in ES.
|
|
777
|
+
*/
|
|
778
|
+
|
|
779
|
+
if (latestStorageEntry.id === entry.id) {
|
|
780
|
+
const {
|
|
781
|
+
index
|
|
782
|
+
} = _configurations.default.es({
|
|
783
|
+
model
|
|
784
|
+
});
|
|
785
|
+
|
|
786
|
+
const preparedEntryData = (0, _helpers.prepareEntryToIndex)({
|
|
787
|
+
plugins,
|
|
788
|
+
model,
|
|
789
|
+
entry: (0, _lodash.default)(entry),
|
|
790
|
+
storageEntry: (0, _lodash.default)(storageEntry)
|
|
791
|
+
});
|
|
792
|
+
const esLatestData = await getESLatestEntryData(plugins, preparedEntryData);
|
|
793
|
+
esItems.push(esEntity.putBatch({
|
|
794
|
+
PK: partitionKey,
|
|
795
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
796
|
+
index,
|
|
797
|
+
data: esLatestData
|
|
798
|
+
}));
|
|
799
|
+
}
|
|
800
|
+
/**
|
|
801
|
+
* Finally, execute regular table batch.
|
|
802
|
+
*/
|
|
803
|
+
|
|
804
|
+
|
|
805
|
+
try {
|
|
806
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
807
|
+
table: entity.table,
|
|
808
|
+
items
|
|
809
|
+
});
|
|
810
|
+
dataLoaders.clearAll({
|
|
811
|
+
model
|
|
812
|
+
});
|
|
813
|
+
} catch (ex) {
|
|
814
|
+
throw new _error.default(ex.message || "Could not store unpublished entry records in DynamoDB table.", ex.code || "UNPUBLISH_ERROR", {
|
|
815
|
+
entry,
|
|
816
|
+
storageEntry
|
|
817
|
+
});
|
|
818
|
+
}
|
|
819
|
+
/**
|
|
820
|
+
* And Elasticsearch table batch.
|
|
821
|
+
*/
|
|
822
|
+
|
|
823
|
+
|
|
824
|
+
try {
|
|
825
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
826
|
+
table: esEntity.table,
|
|
827
|
+
items: esItems
|
|
828
|
+
});
|
|
829
|
+
} catch (ex) {
|
|
830
|
+
throw new _error.default(ex.message || "Could not store unpublished entry records in DynamoDB Elasticsearch table.", ex.code || "UNPUBLISH_ERROR", {
|
|
831
|
+
entry,
|
|
832
|
+
storageEntry
|
|
833
|
+
});
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
return storageEntry;
|
|
837
|
+
};
|
|
838
|
+
|
|
839
|
+
const requestReview = async (model, params) => {
|
|
840
|
+
const {
|
|
841
|
+
entry,
|
|
842
|
+
storageEntry,
|
|
843
|
+
originalEntry
|
|
844
|
+
} = params;
|
|
845
|
+
/**
|
|
846
|
+
* We need the latest entry to check if it needs to be updated.
|
|
847
|
+
*/
|
|
848
|
+
|
|
849
|
+
const [latestStorageEntry] = await dataLoaders.getLatestRevisionByEntryId({
|
|
850
|
+
model,
|
|
851
|
+
ids: [entry.id]
|
|
852
|
+
});
|
|
853
|
+
const partitionKey = (0, _keys.createPartitionKey)(entry);
|
|
854
|
+
/**
|
|
855
|
+
* If we updated the latest version, then make sure the changes are propagated to ES too.
|
|
856
|
+
*/
|
|
857
|
+
|
|
858
|
+
let esLatestData = null;
|
|
859
|
+
|
|
860
|
+
const {
|
|
861
|
+
index
|
|
862
|
+
} = _configurations.default.es({
|
|
863
|
+
model
|
|
864
|
+
});
|
|
865
|
+
|
|
866
|
+
if (latestStorageEntry && latestStorageEntry.id === entry.id) {
|
|
867
|
+
const preparedEntryData = (0, _helpers.prepareEntryToIndex)({
|
|
868
|
+
plugins,
|
|
869
|
+
model,
|
|
870
|
+
entry: (0, _lodash.default)(entry),
|
|
871
|
+
storageEntry: (0, _lodash.default)(storageEntry)
|
|
872
|
+
});
|
|
873
|
+
esLatestData = await getESLatestEntryData(plugins, preparedEntryData);
|
|
874
|
+
}
|
|
875
|
+
|
|
876
|
+
try {
|
|
877
|
+
await entity.put(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
878
|
+
PK: partitionKey,
|
|
879
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
880
|
+
TYPE: createType()
|
|
881
|
+
}));
|
|
882
|
+
dataLoaders.clearAll({
|
|
883
|
+
model
|
|
884
|
+
});
|
|
885
|
+
} catch (ex) {
|
|
886
|
+
throw new _error.default(ex.message || "Could not store request review entry record into DynamoDB table.", ex.code || "REQUEST_REVIEW_ERROR", {
|
|
887
|
+
entry,
|
|
888
|
+
latestStorageEntry,
|
|
889
|
+
originalEntry
|
|
890
|
+
});
|
|
891
|
+
}
|
|
892
|
+
/**
|
|
893
|
+
* No need to proceed further if nothing to put into Elasticsearch.
|
|
894
|
+
*/
|
|
895
|
+
|
|
896
|
+
|
|
897
|
+
if (!esLatestData) {
|
|
898
|
+
return storageEntry;
|
|
899
|
+
}
|
|
900
|
+
|
|
901
|
+
try {
|
|
902
|
+
await esEntity.put({
|
|
903
|
+
PK: partitionKey,
|
|
904
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
905
|
+
index,
|
|
906
|
+
data: esLatestData
|
|
907
|
+
});
|
|
908
|
+
} catch (ex) {
|
|
909
|
+
throw new _error.default(ex.message || "Could not store request review entry record into DynamoDB Elasticsearch table.", ex.code || "REQUEST_REVIEW_ERROR", {
|
|
910
|
+
entry,
|
|
911
|
+
latestStorageEntry,
|
|
912
|
+
originalEntry
|
|
913
|
+
});
|
|
914
|
+
}
|
|
915
|
+
|
|
916
|
+
return storageEntry;
|
|
917
|
+
};
|
|
918
|
+
|
|
919
|
+
const requestChanges = async (model, params) => {
|
|
920
|
+
const {
|
|
921
|
+
entry,
|
|
922
|
+
storageEntry,
|
|
923
|
+
originalEntry
|
|
924
|
+
} = params;
|
|
925
|
+
/**
|
|
926
|
+
* We need the latest entry to check if it needs to be updated.
|
|
927
|
+
*/
|
|
928
|
+
|
|
929
|
+
const [latestStorageEntry] = await dataLoaders.getLatestRevisionByEntryId({
|
|
930
|
+
model,
|
|
931
|
+
ids: [entry.id]
|
|
932
|
+
});
|
|
933
|
+
const partitionKey = (0, _keys.createPartitionKey)(entry);
|
|
934
|
+
const items = [entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
935
|
+
PK: partitionKey,
|
|
936
|
+
SK: (0, _keys.createRevisionSortKey)(entry),
|
|
937
|
+
TYPE: createType()
|
|
938
|
+
}))];
|
|
939
|
+
/**
|
|
940
|
+
* If we updated the latest version, then make sure the changes are propagated to ES too.
|
|
941
|
+
*/
|
|
942
|
+
|
|
943
|
+
const {
|
|
944
|
+
index
|
|
945
|
+
} = _configurations.default.es({
|
|
946
|
+
model
|
|
947
|
+
});
|
|
948
|
+
|
|
949
|
+
let esLatestData = null;
|
|
950
|
+
|
|
951
|
+
if (latestStorageEntry && latestStorageEntry.id === entry.id) {
|
|
952
|
+
items.push(entity.putBatch(_objectSpread(_objectSpread({}, storageEntry), {}, {
|
|
953
|
+
PK: partitionKey,
|
|
954
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
955
|
+
TYPE: createLatestType()
|
|
956
|
+
})));
|
|
957
|
+
const preparedEntryData = (0, _helpers.prepareEntryToIndex)({
|
|
958
|
+
plugins,
|
|
959
|
+
model,
|
|
960
|
+
entry: (0, _lodash.default)(entry),
|
|
961
|
+
storageEntry: (0, _lodash.default)(storageEntry)
|
|
962
|
+
});
|
|
963
|
+
esLatestData = await getESLatestEntryData(plugins, preparedEntryData);
|
|
964
|
+
}
|
|
965
|
+
|
|
966
|
+
try {
|
|
967
|
+
await (0, _batchWrite.batchWriteAll)({
|
|
968
|
+
table: entity.table,
|
|
969
|
+
items
|
|
970
|
+
});
|
|
971
|
+
dataLoaders.clearAll({
|
|
972
|
+
model
|
|
973
|
+
});
|
|
974
|
+
} catch (ex) {
|
|
975
|
+
throw new _error.default(ex.message || "Could not store request changes entry record into DynamoDB table.", ex.code || "REQUEST_CHANGES_ERROR", {
|
|
976
|
+
entry,
|
|
977
|
+
latestStorageEntry,
|
|
978
|
+
originalEntry
|
|
979
|
+
});
|
|
980
|
+
}
|
|
981
|
+
/**
|
|
982
|
+
* No need to proceed further if nothing to put into Elasticsearch.
|
|
983
|
+
*/
|
|
984
|
+
|
|
985
|
+
|
|
986
|
+
if (!esLatestData) {
|
|
987
|
+
return storageEntry;
|
|
988
|
+
}
|
|
989
|
+
|
|
990
|
+
try {
|
|
991
|
+
await esEntity.put({
|
|
992
|
+
PK: partitionKey,
|
|
993
|
+
SK: (0, _keys.createLatestSortKey)(),
|
|
994
|
+
index,
|
|
995
|
+
data: esLatestData
|
|
996
|
+
});
|
|
997
|
+
} catch (ex) {
|
|
998
|
+
throw new _error.default(ex.message || "Could not store request changes entry record into DynamoDB Elasticsearch table.", ex.code || "REQUEST_CHANGES_ERROR", {
|
|
999
|
+
entry,
|
|
1000
|
+
latestStorageEntry,
|
|
1001
|
+
originalEntry
|
|
1002
|
+
});
|
|
1003
|
+
}
|
|
1004
|
+
|
|
1005
|
+
return storageEntry;
|
|
1006
|
+
};
|
|
1007
|
+
|
|
1008
|
+
const getAllRevisionsByIds = async (model, params) => {
|
|
1009
|
+
return await dataLoaders.getAllEntryRevisions({
|
|
1010
|
+
model,
|
|
1011
|
+
ids: params.ids
|
|
1012
|
+
});
|
|
1013
|
+
};
|
|
1014
|
+
|
|
1015
|
+
const getLatestRevisionByEntryId = async (model, params) => {
|
|
1016
|
+
const result = await dataLoaders.getLatestRevisionByEntryId({
|
|
1017
|
+
model,
|
|
1018
|
+
ids: [params.id]
|
|
1019
|
+
});
|
|
1020
|
+
|
|
1021
|
+
if (result.length === 0) {
|
|
1022
|
+
return null;
|
|
1023
|
+
}
|
|
1024
|
+
|
|
1025
|
+
return result.shift();
|
|
1026
|
+
};
|
|
1027
|
+
|
|
1028
|
+
const getPublishedRevisionByEntryId = async (model, params) => {
|
|
1029
|
+
const result = await dataLoaders.getPublishedRevisionByEntryId({
|
|
1030
|
+
model,
|
|
1031
|
+
ids: [params.id]
|
|
1032
|
+
});
|
|
1033
|
+
|
|
1034
|
+
if (result.length === 0) {
|
|
1035
|
+
return null;
|
|
1036
|
+
}
|
|
1037
|
+
|
|
1038
|
+
return result.shift();
|
|
1039
|
+
};
|
|
1040
|
+
|
|
1041
|
+
const getRevisionById = async (model, params) => {
|
|
1042
|
+
const result = await dataLoaders.getRevisionById({
|
|
1043
|
+
model,
|
|
1044
|
+
ids: [params.id]
|
|
1045
|
+
});
|
|
1046
|
+
|
|
1047
|
+
if (result.length === 0) {
|
|
1048
|
+
return null;
|
|
1049
|
+
}
|
|
1050
|
+
|
|
1051
|
+
return result.shift();
|
|
1052
|
+
};
|
|
1053
|
+
|
|
1054
|
+
const getRevisions = async (model, params) => {
|
|
1055
|
+
return await dataLoaders.getAllEntryRevisions({
|
|
1056
|
+
model,
|
|
1057
|
+
ids: [params.id]
|
|
1058
|
+
});
|
|
1059
|
+
};
|
|
1060
|
+
|
|
1061
|
+
const getByIds = async (model, params) => {
|
|
1062
|
+
return dataLoaders.getRevisionById({
|
|
1063
|
+
model,
|
|
1064
|
+
ids: params.ids
|
|
1065
|
+
});
|
|
1066
|
+
};
|
|
1067
|
+
|
|
1068
|
+
const getLatestByIds = async (model, params) => {
|
|
1069
|
+
return dataLoaders.getLatestRevisionByEntryId({
|
|
1070
|
+
model,
|
|
1071
|
+
ids: params.ids
|
|
1072
|
+
});
|
|
1073
|
+
};
|
|
1074
|
+
|
|
1075
|
+
const getPublishedByIds = async (model, params) => {
|
|
1076
|
+
return dataLoaders.getPublishedRevisionByEntryId({
|
|
1077
|
+
model,
|
|
1078
|
+
ids: params.ids
|
|
1079
|
+
});
|
|
1080
|
+
};
|
|
1081
|
+
|
|
1082
|
+
const getPreviousRevision = async (model, params) => {
|
|
1083
|
+
const {
|
|
1084
|
+
tenant,
|
|
1085
|
+
locale
|
|
1086
|
+
} = model;
|
|
1087
|
+
const {
|
|
1088
|
+
entryId,
|
|
1089
|
+
version
|
|
1090
|
+
} = params;
|
|
1091
|
+
const queryParams = {
|
|
1092
|
+
entity,
|
|
1093
|
+
partitionKey: (0, _keys.createPartitionKey)({
|
|
1094
|
+
tenant,
|
|
1095
|
+
locale,
|
|
1096
|
+
id: entryId
|
|
1097
|
+
}),
|
|
1098
|
+
options: {
|
|
1099
|
+
lt: `REV#${(0, _utils.zeroPad)(version)}`,
|
|
1100
|
+
|
|
1101
|
+
/**
|
|
1102
|
+
* We need to have extra checks because DynamoDB will return published or latest record if there is no REV# record.
|
|
1103
|
+
*/
|
|
1104
|
+
filters: [{
|
|
1105
|
+
attr: "TYPE",
|
|
1106
|
+
eq: createType()
|
|
1107
|
+
}, {
|
|
1108
|
+
attr: "version",
|
|
1109
|
+
lt: version
|
|
1110
|
+
}],
|
|
1111
|
+
reverse: true
|
|
1112
|
+
}
|
|
1113
|
+
};
|
|
1114
|
+
|
|
1115
|
+
try {
|
|
1116
|
+
const result = await (0, _query.queryOne)(queryParams);
|
|
1117
|
+
return (0, _cleanup.cleanupItem)(entity, result);
|
|
1118
|
+
} catch (ex) {
|
|
1119
|
+
throw new _error.default(ex.message || "Could not get previous version of given entry.", ex.code || "GET_PREVIOUS_VERSION_ERROR", _objectSpread(_objectSpread({}, params), {}, {
|
|
1120
|
+
error: ex,
|
|
1121
|
+
partitionKey: queryParams.partitionKey,
|
|
1122
|
+
options: queryParams.options,
|
|
1123
|
+
model
|
|
1124
|
+
}));
|
|
1125
|
+
}
|
|
1126
|
+
};
|
|
1127
|
+
|
|
1128
|
+
return {
|
|
1129
|
+
create,
|
|
1130
|
+
createRevisionFrom,
|
|
1131
|
+
update,
|
|
1132
|
+
delete: deleteEntry,
|
|
1133
|
+
deleteRevision,
|
|
1134
|
+
get,
|
|
1135
|
+
publish,
|
|
1136
|
+
unpublish,
|
|
1137
|
+
requestReview,
|
|
1138
|
+
requestChanges,
|
|
1139
|
+
list,
|
|
1140
|
+
getAllRevisionsByIds,
|
|
1141
|
+
getLatestRevisionByEntryId,
|
|
1142
|
+
getPublishedRevisionByEntryId,
|
|
1143
|
+
getRevisionById,
|
|
1144
|
+
getRevisions,
|
|
1145
|
+
getByIds,
|
|
1146
|
+
getLatestByIds,
|
|
1147
|
+
getPublishedByIds,
|
|
1148
|
+
getPreviousRevision
|
|
1149
|
+
};
|
|
1150
|
+
};
|
|
1151
|
+
|
|
1152
|
+
exports.createEntriesStorageOperations = createEntriesStorageOperations;
|