@knocklabs/cli 0.1.6 → 0.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -30
- package/dist/commands/commit/get.js +76 -0
- package/dist/commands/commit/list.js +99 -0
- package/dist/commands/commit/promote.js +36 -3
- package/dist/commands/layout/list.js +2 -2
- package/dist/commands/layout/pull.js +3 -3
- package/dist/commands/layout/push.js +2 -2
- package/dist/commands/translation/list.js +2 -2
- package/dist/commands/translation/pull.js +2 -2
- package/dist/commands/workflow/list.js +2 -2
- package/dist/commands/workflow/new.js +2 -2
- package/dist/commands/workflow/pull.js +3 -3
- package/dist/commands/workflow/push.js +2 -2
- package/dist/lib/api-v1.js +34 -18
- package/dist/lib/helpers/{object.js → object.isomorphic.js} +8 -1
- package/dist/lib/marshal/commit/helpers.js +13 -0
- package/dist/lib/marshal/commit/index.js +17 -0
- package/dist/lib/marshal/commit/types.js +5 -0
- package/dist/lib/marshal/email-layout/reader.js +7 -6
- package/dist/lib/marshal/email-layout/writer.js +12 -12
- package/dist/lib/marshal/shared/const.isomorphic.js +25 -0
- package/dist/lib/marshal/shared/helpers.js +0 -4
- package/dist/lib/marshal/workflow/generator.js +8 -8
- package/dist/lib/marshal/workflow/helpers.js +3 -6
- package/dist/lib/marshal/workflow/index.js +1 -0
- package/dist/lib/marshal/workflow/processor.isomorphic.js +272 -0
- package/dist/lib/marshal/workflow/reader.js +11 -9
- package/dist/lib/marshal/workflow/writer.js +7 -238
- package/oclif.manifest.json +111 -4
- package/package.json +8 -8
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* IMPORTANT:
|
|
3
|
+
*
|
|
4
|
+
* This file is suffixed with `.isomorphic` because the code in this file is
|
|
5
|
+
* meant to run not just in a nodejs environment but also in a browser. For this
|
|
6
|
+
* reason there are some restrictions for which nodejs imports are allowed in
|
|
7
|
+
* this module. See `.eslintrc.json` for more details.
|
|
8
|
+
*/ "use strict";
|
|
9
|
+
Object.defineProperty(exports, "__esModule", {
|
|
10
|
+
value: true
|
|
11
|
+
});
|
|
12
|
+
function _export(target, all) {
|
|
13
|
+
for(var name in all)Object.defineProperty(target, name, {
|
|
14
|
+
enumerable: true,
|
|
15
|
+
get: all[name]
|
|
16
|
+
});
|
|
17
|
+
}
|
|
18
|
+
_export(exports, {
|
|
19
|
+
WORKFLOW_JSON: ()=>WORKFLOW_JSON,
|
|
20
|
+
VISUAL_BLOCKS_JSON: ()=>VISUAL_BLOCKS_JSON,
|
|
21
|
+
buildWorkflowDirBundle: ()=>buildWorkflowDirBundle,
|
|
22
|
+
formatExtractedFilePath: ()=>formatExtractedFilePath,
|
|
23
|
+
toWorkflowJson: ()=>toWorkflowJson
|
|
24
|
+
});
|
|
25
|
+
const _nodePath = /*#__PURE__*/ _interopRequireWildcard(require("node:path"));
|
|
26
|
+
const _lodash = require("lodash");
|
|
27
|
+
const _objectIsomorphic = require("../../helpers/object.isomorphic");
|
|
28
|
+
const _constIsomorphic = require("../shared/const.isomorphic");
|
|
29
|
+
const _types = require("./types");
|
|
30
|
+
function _getRequireWildcardCache(nodeInterop) {
|
|
31
|
+
if (typeof WeakMap !== "function") return null;
|
|
32
|
+
var cacheBabelInterop = new WeakMap();
|
|
33
|
+
var cacheNodeInterop = new WeakMap();
|
|
34
|
+
return (_getRequireWildcardCache = function(nodeInterop) {
|
|
35
|
+
return nodeInterop ? cacheNodeInterop : cacheBabelInterop;
|
|
36
|
+
})(nodeInterop);
|
|
37
|
+
}
|
|
38
|
+
function _interopRequireWildcard(obj, nodeInterop) {
|
|
39
|
+
if (!nodeInterop && obj && obj.__esModule) {
|
|
40
|
+
return obj;
|
|
41
|
+
}
|
|
42
|
+
if (obj === null || typeof obj !== "object" && typeof obj !== "function") {
|
|
43
|
+
return {
|
|
44
|
+
default: obj
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
var cache = _getRequireWildcardCache(nodeInterop);
|
|
48
|
+
if (cache && cache.has(obj)) {
|
|
49
|
+
return cache.get(obj);
|
|
50
|
+
}
|
|
51
|
+
var newObj = {};
|
|
52
|
+
var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor;
|
|
53
|
+
for(var key in obj){
|
|
54
|
+
if (key !== "default" && Object.prototype.hasOwnProperty.call(obj, key)) {
|
|
55
|
+
var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null;
|
|
56
|
+
if (desc && (desc.get || desc.set)) {
|
|
57
|
+
Object.defineProperty(newObj, key, desc);
|
|
58
|
+
} else {
|
|
59
|
+
newObj[key] = obj[key];
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
newObj.default = obj;
|
|
64
|
+
if (cache) {
|
|
65
|
+
cache.set(obj, newObj);
|
|
66
|
+
}
|
|
67
|
+
return newObj;
|
|
68
|
+
}
|
|
69
|
+
const WORKFLOW_JSON = "workflow.json";
|
|
70
|
+
const VISUAL_BLOCKS_JSON = "visual_blocks.json";
|
|
71
|
+
/*
|
|
72
|
+
* Sanitize the workflow content into a format that's appropriate for reading
|
|
73
|
+
* and writing, by stripping out any annotation fields and handling readonly
|
|
74
|
+
* fields.
|
|
75
|
+
*/ const toWorkflowJson = (workflow)=>{
|
|
76
|
+
var _workflow___annotation;
|
|
77
|
+
// Move read only fields of a workflow under the dedicated field "__readonly".
|
|
78
|
+
const readonlyFields = ((_workflow___annotation = workflow.__annotation) === null || _workflow___annotation === void 0 ? void 0 : _workflow___annotation.readonly_fields) || [];
|
|
79
|
+
const [readonly, remainder] = (0, _objectIsomorphic.split)(workflow, readonlyFields);
|
|
80
|
+
const worklfowJson = {
|
|
81
|
+
...remainder,
|
|
82
|
+
__readonly: readonly
|
|
83
|
+
};
|
|
84
|
+
// Strip out all schema annotations, so not to expose them to end users.
|
|
85
|
+
return (0, _objectIsomorphic.omitDeep)(worklfowJson, [
|
|
86
|
+
"__annotation"
|
|
87
|
+
]);
|
|
88
|
+
};
|
|
89
|
+
const formatExtractedFilePath = (objPathParts, fileExt, opts = {})=>{
|
|
90
|
+
const { unnestDirsBy =0 , nestIntoDirs =[] } = opts;
|
|
91
|
+
// 1. Unnest the obj path parts by the given depths, if the option is given.
|
|
92
|
+
const maxUnnestableDepth = Math.min(Math.max(objPathParts.length - 1, 0), unnestDirsBy);
|
|
93
|
+
const unnestedObjPathParts = objPathParts.slice(maxUnnestableDepth, objPathParts.length);
|
|
94
|
+
// 2. Build the file path parts based on the object path parts.
|
|
95
|
+
const filePathParts = [];
|
|
96
|
+
let arrayIndexNums = [];
|
|
97
|
+
for (const part of unnestedObjPathParts){
|
|
98
|
+
if (typeof part === "string" && arrayIndexNums.length > 0) {
|
|
99
|
+
filePathParts.push([
|
|
100
|
+
...arrayIndexNums,
|
|
101
|
+
part
|
|
102
|
+
].join("."));
|
|
103
|
+
arrayIndexNums = [];
|
|
104
|
+
continue;
|
|
105
|
+
}
|
|
106
|
+
if (typeof part === "string") {
|
|
107
|
+
filePathParts.push(part);
|
|
108
|
+
continue;
|
|
109
|
+
}
|
|
110
|
+
if (typeof part === "number") {
|
|
111
|
+
arrayIndexNums.push(part + 1);
|
|
112
|
+
continue;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
if (arrayIndexNums.length > 0) {
|
|
116
|
+
filePathParts.push(arrayIndexNums.join("."));
|
|
117
|
+
}
|
|
118
|
+
// 3. Format the final file path out based on the file path parts. Nest it
|
|
119
|
+
// under the directories if the option is given.
|
|
120
|
+
const fileName = filePathParts.pop();
|
|
121
|
+
const paths = [
|
|
122
|
+
...nestIntoDirs,
|
|
123
|
+
...filePathParts,
|
|
124
|
+
`${fileName}.${fileExt}`
|
|
125
|
+
];
|
|
126
|
+
return _nodePath.join(...paths).toLowerCase();
|
|
127
|
+
};
|
|
128
|
+
const NON_RECURSIVELY_TRAVERSABLE_FIELDS_FOR_EXTRACTION = new Set([
|
|
129
|
+
"branches"
|
|
130
|
+
]);
|
|
131
|
+
const compileExtractionSettings = (node, objPathParts = [])=>{
|
|
132
|
+
const map = new Map();
|
|
133
|
+
const compileRecursively = (item, parts)=>{
|
|
134
|
+
if ((0, _lodash.isPlainObject)(item)) {
|
|
135
|
+
const extractableFields = (0, _lodash.get)(item, [
|
|
136
|
+
"__annotation",
|
|
137
|
+
"extractable_fields"
|
|
138
|
+
], {});
|
|
139
|
+
for (const [key, val] of Object.entries(item)){
|
|
140
|
+
// If the field we are on is extractable, then add its extraction
|
|
141
|
+
// settings to the map with the current object path.
|
|
142
|
+
if (key in extractableFields) {
|
|
143
|
+
map.set([
|
|
144
|
+
...parts,
|
|
145
|
+
key
|
|
146
|
+
], extractableFields[key]);
|
|
147
|
+
}
|
|
148
|
+
// Recursively exam current field for any additionally extractable data
|
|
149
|
+
// within, except for disallowed fields
|
|
150
|
+
if (!NON_RECURSIVELY_TRAVERSABLE_FIELDS_FOR_EXTRACTION.has(key)) {
|
|
151
|
+
compileRecursively(val, [
|
|
152
|
+
...parts,
|
|
153
|
+
key
|
|
154
|
+
]);
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
return;
|
|
158
|
+
}
|
|
159
|
+
if (Array.isArray(item)) {
|
|
160
|
+
item.map((val, idx)=>compileRecursively(val, [
|
|
161
|
+
...parts,
|
|
162
|
+
idx
|
|
163
|
+
]));
|
|
164
|
+
}
|
|
165
|
+
};
|
|
166
|
+
// Walk the node tree and compile all extractable fields by object path.
|
|
167
|
+
compileRecursively(node, objPathParts);
|
|
168
|
+
// Sort the compiled entries in desc order by the object path length, so the
|
|
169
|
+
// deepest nested fields come first and the top most fields come last because
|
|
170
|
+
// this is the order we should be extracting and replacing field contents.
|
|
171
|
+
return new Map([
|
|
172
|
+
...map
|
|
173
|
+
].sort((a, b)=>{
|
|
174
|
+
const aLength = a[0].length;
|
|
175
|
+
const bLength = b[0].length;
|
|
176
|
+
if (aLength < bLength) return 1;
|
|
177
|
+
if (aLength > bLength) return -1;
|
|
178
|
+
return 0;
|
|
179
|
+
}));
|
|
180
|
+
};
|
|
181
|
+
const keyLocalWorkflowStepsByRef = (steps, result = {})=>{
|
|
182
|
+
if (!Array.isArray(steps)) return result;
|
|
183
|
+
for (const step of steps){
|
|
184
|
+
if (!(0, _lodash.isPlainObject)(step)) continue;
|
|
185
|
+
if (!step.ref) continue;
|
|
186
|
+
result[step.ref] = step;
|
|
187
|
+
if (step.type === _types.StepType.Branch && Array.isArray(step.branches)) {
|
|
188
|
+
for (const branch of step.branches){
|
|
189
|
+
if (!(0, _lodash.isPlainObject)(branch)) continue;
|
|
190
|
+
result = keyLocalWorkflowStepsByRef(branch.steps, result);
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
return result;
|
|
195
|
+
};
|
|
196
|
+
const recursivelyBuildWorkflowDirBundle = (bundle, steps, localWorkflowStepsByRef)=>{
|
|
197
|
+
for (const step of steps){
|
|
198
|
+
// A compiled map of extraction settings of every field in the step where
|
|
199
|
+
// we support content extraction, organized by each field's object path.
|
|
200
|
+
const compiledExtractionSettings = compileExtractionSettings(step);
|
|
201
|
+
// Iterate through each extractable field, determine whether we need to
|
|
202
|
+
// extract the field content in the remote workflow, and if so, perform the
|
|
203
|
+
// extraction. Note, this compiled map is ordered by the deepest nested to
|
|
204
|
+
// the top most fields, so that more than one extraction is possible.
|
|
205
|
+
for (const [objPathParts, extractionSettings] of compiledExtractionSettings){
|
|
206
|
+
// If this step doesn't have this object path, then it's not relevant so
|
|
207
|
+
// nothing more to do here.
|
|
208
|
+
if (!(0, _lodash.has)(step, objPathParts)) continue;
|
|
209
|
+
// If the field at this path is extracted in the local workflow, then
|
|
210
|
+
// always extract; otherwise extract based on the field settings default.
|
|
211
|
+
const objPathStr = _objectIsomorphic.ObjPath.stringify(objPathParts);
|
|
212
|
+
const extractedFilePath = (0, _lodash.get)(localWorkflowStepsByRef, `${step.ref}.${objPathStr}${_constIsomorphic.FILEPATH_MARKER}`);
|
|
213
|
+
const { default: extractByDefault , file_ext: fileExt } = extractionSettings;
|
|
214
|
+
if (!extractedFilePath && !extractByDefault) continue;
|
|
215
|
+
// By this point, we have a field where we need to extract its content.
|
|
216
|
+
// First figure out the relative file path (within the workflow directory)
|
|
217
|
+
// for the extracted file. If already extracted in the local workflow,
|
|
218
|
+
// then use that; otherwise format a new file path.
|
|
219
|
+
const relpath = typeof extractedFilePath === "string" ? extractedFilePath : formatExtractedFilePath(objPathParts, fileExt, {
|
|
220
|
+
unnestDirsBy: 1,
|
|
221
|
+
nestIntoDirs: [
|
|
222
|
+
step.ref
|
|
223
|
+
]
|
|
224
|
+
});
|
|
225
|
+
// In case we are about to extract a field that has children rather than
|
|
226
|
+
// string content (e.g. visual blocks), prepare the data to strip out any
|
|
227
|
+
// annotations.
|
|
228
|
+
let data = (0, _objectIsomorphic.omitDeep)((0, _lodash.get)(step, objPathParts), [
|
|
229
|
+
"__annotation"
|
|
230
|
+
]);
|
|
231
|
+
// Also, if the extractable data contains extracted file paths in itself
|
|
232
|
+
// then rebase those file paths to be relative to its referenced file.
|
|
233
|
+
data = (0, _objectIsomorphic.mapValuesDeep)(data, (value, key)=>{
|
|
234
|
+
if (!_constIsomorphic.FILEPATH_MARKED_RE.test(key)) return value;
|
|
235
|
+
const rebaseRootDir = _nodePath.dirname(relpath);
|
|
236
|
+
const rebasedFilePath = _nodePath.relative(rebaseRootDir, value);
|
|
237
|
+
return rebasedFilePath;
|
|
238
|
+
});
|
|
239
|
+
const content = typeof data === "string" ? data : JSON.stringify(data, null, 2);
|
|
240
|
+
// Perform the extraction by adding the content and its file path to the
|
|
241
|
+
// bundle for writing to the file system later. Then replace the field
|
|
242
|
+
// content with the extracted file path and mark the field as extracted
|
|
243
|
+
// with @ suffix.
|
|
244
|
+
//
|
|
245
|
+
// TODO: Consider guarding against an edge case, and check if the relpath
|
|
246
|
+
// already exists in the bundle, and if so make the relpath unique.
|
|
247
|
+
(0, _lodash.set)(bundle, [
|
|
248
|
+
relpath
|
|
249
|
+
], content);
|
|
250
|
+
(0, _lodash.set)(step, `${objPathStr}${_constIsomorphic.FILEPATH_MARKER}`, relpath);
|
|
251
|
+
(0, _lodash.unset)(step, objPathParts);
|
|
252
|
+
}
|
|
253
|
+
// Lastly, recurse thru any branches that exist in the workflow tree
|
|
254
|
+
if (step.type === _types.StepType.Branch) {
|
|
255
|
+
for (const branch of step.branches){
|
|
256
|
+
recursivelyBuildWorkflowDirBundle(bundle, branch.steps, localWorkflowStepsByRef);
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
};
|
|
261
|
+
const buildWorkflowDirBundle = (remoteWorkflow, localWorkflow = {})=>{
|
|
262
|
+
const bundle = {};
|
|
263
|
+
const mutWorkflow = (0, _lodash.cloneDeep)(remoteWorkflow);
|
|
264
|
+
const localWorkflowStepsByRef = keyLocalWorkflowStepsByRef(localWorkflow.steps);
|
|
265
|
+
// Recursively traverse the workflow step tree, mutating it and the bundle
|
|
266
|
+
// along the way
|
|
267
|
+
recursivelyBuildWorkflowDirBundle(bundle, mutWorkflow.steps, localWorkflowStepsByRef);
|
|
268
|
+
// Then, prepare the workflow data to be written into a workflow json file.
|
|
269
|
+
return (0, _lodash.set)(bundle, [
|
|
270
|
+
WORKFLOW_JSON
|
|
271
|
+
], toWorkflowJson(mutWorkflow));
|
|
272
|
+
};
|
|
@@ -18,9 +18,11 @@ const _fsExtra = /*#__PURE__*/ _interopRequireWildcard(require("fs-extra"));
|
|
|
18
18
|
const _lodash = require("lodash");
|
|
19
19
|
const _error = require("../../helpers/error");
|
|
20
20
|
const _json = require("../../helpers/json");
|
|
21
|
-
const
|
|
21
|
+
const _objectIsomorphic = require("../../helpers/object.isomorphic");
|
|
22
|
+
const _constIsomorphic = require("../shared/const.isomorphic");
|
|
22
23
|
const _helpers = require("../shared/helpers");
|
|
23
24
|
const _helpers1 = require("./helpers");
|
|
25
|
+
const _processorIsomorphic = require("./processor.isomorphic");
|
|
24
26
|
function _getRequireWildcardCache(nodeInterop) {
|
|
25
27
|
if (typeof WeakMap !== "function") return null;
|
|
26
28
|
var cacheBabelInterop = new WeakMap();
|
|
@@ -80,21 +82,21 @@ const joinExtractedFiles = async (workflowDirCtx, workflowJson)=>{
|
|
|
80
82
|
}).entries()){
|
|
81
83
|
const currJoinedFilePaths = {};
|
|
82
84
|
const prevJoinedFilePaths = joinedFilePathsPerLevel[idx - 1] || {};
|
|
83
|
-
(0,
|
|
85
|
+
(0, _objectIsomorphic.mapValuesDeep)(workflowJson, (value, key, parts)=>{
|
|
84
86
|
// If not marked with the @ suffix, there's nothing to do.
|
|
85
|
-
if (!
|
|
86
|
-
const objPathToFieldStr =
|
|
87
|
-
const inlinObjPathStr = objPathToFieldStr.replace(
|
|
87
|
+
if (!_constIsomorphic.FILEPATH_MARKED_RE.test(key)) return;
|
|
88
|
+
const objPathToFieldStr = _objectIsomorphic.ObjPath.stringify(parts);
|
|
89
|
+
const inlinObjPathStr = objPathToFieldStr.replace(_constIsomorphic.FILEPATH_MARKED_RE, "");
|
|
88
90
|
// If there is inlined content present already, then nothing more to do.
|
|
89
91
|
if ((0, _lodash.hasIn)(workflowJson, inlinObjPathStr)) return;
|
|
90
92
|
// Check if the extracted path found at the current field path belongs to
|
|
91
93
|
// a node whose parent or grandparent has been previously joined earlier
|
|
92
94
|
// in the tree. If so, rebase the extracted path to be a relative path to
|
|
93
95
|
// the workflow json.
|
|
94
|
-
const lastFound = (0,
|
|
96
|
+
const lastFound = (0, _objectIsomorphic.getLastFound)(prevJoinedFilePaths, parts);
|
|
95
97
|
const prevJoinedFilePath = typeof lastFound === "string" ? lastFound : undefined;
|
|
96
98
|
const rebasedFilePath = prevJoinedFilePath ? _nodePath.join(_nodePath.dirname(prevJoinedFilePath), value) : value;
|
|
97
|
-
const invalidFilePathError = (0, _helpers.validateExtractedFilePath)(rebasedFilePath, _nodePath.resolve(workflowDirCtx.abspath,
|
|
99
|
+
const invalidFilePathError = (0, _helpers.validateExtractedFilePath)(rebasedFilePath, _nodePath.resolve(workflowDirCtx.abspath, _processorIsomorphic.WORKFLOW_JSON), uniqueFilePaths, objPathToFieldStr);
|
|
98
100
|
if (invalidFilePathError) {
|
|
99
101
|
errors.push(invalidFilePathError);
|
|
100
102
|
// Wipe the invalid file path in the node so the final workflow json
|
|
@@ -143,7 +145,7 @@ const readWorkflowDir = async (workflowDirCtx, opts = {})=>{
|
|
|
143
145
|
const result = await (0, _json.readJson)(workflowJsonPath);
|
|
144
146
|
if (!result[0]) return result;
|
|
145
147
|
let [workflowJson] = result;
|
|
146
|
-
workflowJson = withReadonlyField ? workflowJson : (0,
|
|
148
|
+
workflowJson = withReadonlyField ? workflowJson : (0, _objectIsomorphic.omitDeep)(workflowJson, [
|
|
147
149
|
"__readonly"
|
|
148
150
|
]);
|
|
149
151
|
return withExtractedFiles ? joinExtractedFiles(workflowDirCtx, workflowJson) : [
|
|
@@ -161,7 +163,7 @@ const readWorkflowDir = async (workflowDirCtx, opts = {})=>{
|
|
|
161
163
|
// eslint-disable-next-line no-await-in-loop
|
|
162
164
|
const [workflow, readErrors] = await readWorkflowDir(workflowDirCtx, opts);
|
|
163
165
|
if (readErrors.length > 0) {
|
|
164
|
-
const workflowJsonPath = _nodePath.resolve(workflowDirCtx.abspath,
|
|
166
|
+
const workflowJsonPath = _nodePath.resolve(workflowDirCtx.abspath, _processorIsomorphic.WORKFLOW_JSON);
|
|
165
167
|
const e = new _error.SourceError((0, _error.formatErrors)(readErrors), workflowJsonPath);
|
|
166
168
|
errors.push(e);
|
|
167
169
|
continue;
|
|
@@ -12,21 +12,16 @@ _export(exports, {
|
|
|
12
12
|
writeWorkflowDirFromData: ()=>writeWorkflowDirFromData,
|
|
13
13
|
writeWorkflowDirFromBundle: ()=>writeWorkflowDirFromBundle,
|
|
14
14
|
writeWorkflowsIndexDir: ()=>writeWorkflowsIndexDir,
|
|
15
|
-
|
|
16
|
-
formatExtractedFilePath: ()=>formatExtractedFilePath,
|
|
17
|
-
pruneWorkflowsIndexDir: ()=>pruneWorkflowsIndexDir,
|
|
18
|
-
toWorkflowJson: ()=>toWorkflowJson
|
|
15
|
+
pruneWorkflowsIndexDir: ()=>pruneWorkflowsIndexDir
|
|
19
16
|
});
|
|
20
17
|
const _nodePath = /*#__PURE__*/ _interopRequireWildcard(require("node:path"));
|
|
21
18
|
const _fsExtra = /*#__PURE__*/ _interopRequireWildcard(require("fs-extra"));
|
|
22
19
|
const _lodash = require("lodash");
|
|
23
20
|
const _const = require("../../helpers/const");
|
|
24
21
|
const _json = require("../../helpers/json");
|
|
25
|
-
const
|
|
26
|
-
const
|
|
27
|
-
const _helpers1 = require("./helpers");
|
|
22
|
+
const _helpers = require("./helpers");
|
|
23
|
+
const _processorIsomorphic = require("./processor.isomorphic");
|
|
28
24
|
const _reader = require("./reader");
|
|
29
|
-
const _types = require("./types");
|
|
30
25
|
function _getRequireWildcardCache(nodeInterop) {
|
|
31
26
|
if (typeof WeakMap !== "function") return null;
|
|
32
27
|
var cacheBabelInterop = new WeakMap();
|
|
@@ -66,239 +61,13 @@ function _interopRequireWildcard(obj, nodeInterop) {
|
|
|
66
61
|
}
|
|
67
62
|
return newObj;
|
|
68
63
|
}
|
|
69
|
-
/*
|
|
70
|
-
* Sanitize the workflow content into a format that's appropriate for reading
|
|
71
|
-
* and writing, by stripping out any annotation fields and handling readonly
|
|
72
|
-
* fields.
|
|
73
|
-
*/ const toWorkflowJson = (workflow)=>{
|
|
74
|
-
var _workflow___annotation;
|
|
75
|
-
// Move read only fields of a workflow under the dedicated field "__readonly".
|
|
76
|
-
const readonlyFields = ((_workflow___annotation = workflow.__annotation) === null || _workflow___annotation === void 0 ? void 0 : _workflow___annotation.readonly_fields) || [];
|
|
77
|
-
const [readonly, remainder] = (0, _object.split)(workflow, readonlyFields);
|
|
78
|
-
const worklfowJson = {
|
|
79
|
-
...remainder,
|
|
80
|
-
__readonly: readonly
|
|
81
|
-
};
|
|
82
|
-
// Strip out all schema annotations, so not to expose them to end users.
|
|
83
|
-
return (0, _object.omitDeep)(worklfowJson, [
|
|
84
|
-
"__annotation"
|
|
85
|
-
]);
|
|
86
|
-
};
|
|
87
|
-
const formatExtractedFilePath = (objPathParts, fileExt, opts = {})=>{
|
|
88
|
-
const { unnestDirsBy =0 , nestIntoDirs =[] } = opts;
|
|
89
|
-
// 1. Unnest the obj path parts by the given depths, if the option is given.
|
|
90
|
-
const maxUnnestableDepth = Math.min(Math.max(objPathParts.length - 1, 0), unnestDirsBy);
|
|
91
|
-
const unnestedObjPathParts = objPathParts.slice(maxUnnestableDepth, objPathParts.length);
|
|
92
|
-
// 2. Build the file path parts based on the object path parts.
|
|
93
|
-
const filePathParts = [];
|
|
94
|
-
let arrayIndexNums = [];
|
|
95
|
-
for (const part of unnestedObjPathParts){
|
|
96
|
-
if (typeof part === "string" && arrayIndexNums.length > 0) {
|
|
97
|
-
filePathParts.push([
|
|
98
|
-
...arrayIndexNums,
|
|
99
|
-
part
|
|
100
|
-
].join("."));
|
|
101
|
-
arrayIndexNums = [];
|
|
102
|
-
continue;
|
|
103
|
-
}
|
|
104
|
-
if (typeof part === "string") {
|
|
105
|
-
filePathParts.push(part);
|
|
106
|
-
continue;
|
|
107
|
-
}
|
|
108
|
-
if (typeof part === "number") {
|
|
109
|
-
arrayIndexNums.push(part + 1);
|
|
110
|
-
continue;
|
|
111
|
-
}
|
|
112
|
-
}
|
|
113
|
-
if (arrayIndexNums.length > 0) {
|
|
114
|
-
filePathParts.push(arrayIndexNums.join("."));
|
|
115
|
-
}
|
|
116
|
-
// 3. Format the final file path out based on the file path parts. Nest it
|
|
117
|
-
// under the directories if the option is given.
|
|
118
|
-
const fileName = filePathParts.pop();
|
|
119
|
-
const paths = [
|
|
120
|
-
...nestIntoDirs,
|
|
121
|
-
...filePathParts,
|
|
122
|
-
`${fileName}.${fileExt}`
|
|
123
|
-
];
|
|
124
|
-
return _nodePath.join(...paths).toLowerCase();
|
|
125
|
-
};
|
|
126
|
-
const NON_RECURSIVELY_TRAVERSABLE_FIELDS_FOR_EXTRACTION = new Set([
|
|
127
|
-
"branches"
|
|
128
|
-
]);
|
|
129
|
-
const compileExtractionSettings = (node, objPathParts = [])=>{
|
|
130
|
-
const map = new Map();
|
|
131
|
-
const compileRecursively = (item, parts)=>{
|
|
132
|
-
if ((0, _lodash.isPlainObject)(item)) {
|
|
133
|
-
const extractableFields = (0, _lodash.get)(item, [
|
|
134
|
-
"__annotation",
|
|
135
|
-
"extractable_fields"
|
|
136
|
-
], {});
|
|
137
|
-
for (const [key, val] of Object.entries(item)){
|
|
138
|
-
// If the field we are on is extractable, then add its extraction
|
|
139
|
-
// settings to the map with the current object path.
|
|
140
|
-
if (key in extractableFields) {
|
|
141
|
-
map.set([
|
|
142
|
-
...parts,
|
|
143
|
-
key
|
|
144
|
-
], extractableFields[key]);
|
|
145
|
-
}
|
|
146
|
-
// Recursively exam current field for any additionally extractable data
|
|
147
|
-
// within, except for disallowed fields
|
|
148
|
-
if (!NON_RECURSIVELY_TRAVERSABLE_FIELDS_FOR_EXTRACTION.has(key)) {
|
|
149
|
-
compileRecursively(val, [
|
|
150
|
-
...parts,
|
|
151
|
-
key
|
|
152
|
-
]);
|
|
153
|
-
}
|
|
154
|
-
}
|
|
155
|
-
return;
|
|
156
|
-
}
|
|
157
|
-
if (Array.isArray(item)) {
|
|
158
|
-
item.map((val, idx)=>compileRecursively(val, [
|
|
159
|
-
...parts,
|
|
160
|
-
idx
|
|
161
|
-
]));
|
|
162
|
-
}
|
|
163
|
-
};
|
|
164
|
-
// Walk the node tree and compile all extractable fields by object path.
|
|
165
|
-
compileRecursively(node, objPathParts);
|
|
166
|
-
// Sort the compiled entries in desc order by the object path length, so the
|
|
167
|
-
// deepest nested fields come first and the top most fields come last because
|
|
168
|
-
// this is the order we should be extracting and replacing field contents.
|
|
169
|
-
return new Map([
|
|
170
|
-
...map
|
|
171
|
-
].sort((a, b)=>{
|
|
172
|
-
const aLength = a[0].length;
|
|
173
|
-
const bLength = b[0].length;
|
|
174
|
-
if (aLength < bLength) return 1;
|
|
175
|
-
if (aLength > bLength) return -1;
|
|
176
|
-
return 0;
|
|
177
|
-
}));
|
|
178
|
-
};
|
|
179
|
-
const keyLocalWorkflowStepsByRef = (steps, result = {})=>{
|
|
180
|
-
if (!Array.isArray(steps)) return result;
|
|
181
|
-
for (const step of steps){
|
|
182
|
-
if (!(0, _lodash.isPlainObject)(step)) continue;
|
|
183
|
-
if (!step.ref) continue;
|
|
184
|
-
result[step.ref] = step;
|
|
185
|
-
if (step.type === _types.StepType.Branch && Array.isArray(step.branches)) {
|
|
186
|
-
for (const branch of step.branches){
|
|
187
|
-
if (!(0, _lodash.isPlainObject)(branch)) continue;
|
|
188
|
-
result = keyLocalWorkflowStepsByRef(branch.steps, result);
|
|
189
|
-
}
|
|
190
|
-
}
|
|
191
|
-
}
|
|
192
|
-
return result;
|
|
193
|
-
};
|
|
194
|
-
const recursivelyBuildWorkflowDirBundle = (bundle, steps, localWorkflowStepsByRef)=>{
|
|
195
|
-
for (const step of steps){
|
|
196
|
-
// A compiled map of extraction settings of every field in the step where
|
|
197
|
-
// we support content extraction, organized by each field's object path.
|
|
198
|
-
const compiledExtractionSettings = compileExtractionSettings(step);
|
|
199
|
-
// Iterate through each extractable field, determine whether we need to
|
|
200
|
-
// extract the field content in the remote workflow, and if so, perform the
|
|
201
|
-
// extraction. Note, this compiled map is ordered by the deepest nested to
|
|
202
|
-
// the top most fields, so that more than one extraction is possible.
|
|
203
|
-
for (const [objPathParts, extractionSettings] of compiledExtractionSettings){
|
|
204
|
-
// If this step doesn't have this object path, then it's not relevant so
|
|
205
|
-
// nothing more to do here.
|
|
206
|
-
if (!(0, _lodash.has)(step, objPathParts)) continue;
|
|
207
|
-
// If the field at this path is extracted in the local workflow, then
|
|
208
|
-
// always extract; otherwise extract based on the field settings default.
|
|
209
|
-
const objPathStr = _object.ObjPath.stringify(objPathParts);
|
|
210
|
-
const extractedFilePath = (0, _lodash.get)(localWorkflowStepsByRef, `${step.ref}.${objPathStr}${_helpers.FILEPATH_MARKER}`);
|
|
211
|
-
const { default: extractByDefault , file_ext: fileExt } = extractionSettings;
|
|
212
|
-
if (!extractedFilePath && !extractByDefault) continue;
|
|
213
|
-
// By this point, we have a field where we need to extract its content.
|
|
214
|
-
// First figure out the relative file path (within the workflow directory)
|
|
215
|
-
// for the extracted file. If already extracted in the local workflow,
|
|
216
|
-
// then use that; otherwise format a new file path.
|
|
217
|
-
const relpath = typeof extractedFilePath === "string" ? extractedFilePath : formatExtractedFilePath(objPathParts, fileExt, {
|
|
218
|
-
unnestDirsBy: 1,
|
|
219
|
-
nestIntoDirs: [
|
|
220
|
-
step.ref
|
|
221
|
-
]
|
|
222
|
-
});
|
|
223
|
-
// In case we are about to extract a field that has children rather than
|
|
224
|
-
// string content (e.g. visual blocks), prepare the data to strip out any
|
|
225
|
-
// annotations.
|
|
226
|
-
let data = (0, _object.omitDeep)((0, _lodash.get)(step, objPathParts), [
|
|
227
|
-
"__annotation"
|
|
228
|
-
]);
|
|
229
|
-
// Also, if the extractable data contains extracted file paths in itself
|
|
230
|
-
// then rebase those file paths to be relative to its referenced file.
|
|
231
|
-
data = (0, _object.mapValuesDeep)(data, (value, key)=>{
|
|
232
|
-
if (!_helpers.FILEPATH_MARKED_RE.test(key)) return value;
|
|
233
|
-
const rebaseRootDir = _nodePath.dirname(relpath);
|
|
234
|
-
const rebasedFilePath = _nodePath.relative(rebaseRootDir, value);
|
|
235
|
-
return rebasedFilePath;
|
|
236
|
-
});
|
|
237
|
-
const content = typeof data === "string" ? data : JSON.stringify(data, null, 2);
|
|
238
|
-
// Perform the extraction by adding the content and its file path to the
|
|
239
|
-
// bundle for writing to the file system later. Then replace the field
|
|
240
|
-
// content with the extracted file path and mark the field as extracted
|
|
241
|
-
// with @ suffix.
|
|
242
|
-
//
|
|
243
|
-
// TODO: Consider guarding against an edge case, and check if the relpath
|
|
244
|
-
// already exists in the bundle, and if so make the relpath unique.
|
|
245
|
-
(0, _lodash.set)(bundle, [
|
|
246
|
-
relpath
|
|
247
|
-
], content);
|
|
248
|
-
(0, _lodash.set)(step, `${objPathStr}${_helpers.FILEPATH_MARKER}`, relpath);
|
|
249
|
-
(0, _lodash.unset)(step, objPathParts);
|
|
250
|
-
}
|
|
251
|
-
// Lastly, recurse thru any branches that exist in the workflow tree
|
|
252
|
-
if (step.type === _types.StepType.Branch) {
|
|
253
|
-
for (const branch of step.branches){
|
|
254
|
-
recursivelyBuildWorkflowDirBundle(bundle, branch.steps, localWorkflowStepsByRef);
|
|
255
|
-
}
|
|
256
|
-
}
|
|
257
|
-
}
|
|
258
|
-
};
|
|
259
|
-
/*
|
|
260
|
-
* For a given workflow payload (and its local workflow reference), this function
|
|
261
|
-
* builds a "workflow directory bundle", which is an obj made up of all the
|
|
262
|
-
* relative file paths (within the workflow directory) and its file content to
|
|
263
|
-
* write the workflow directory.
|
|
264
|
-
*
|
|
265
|
-
* Every workflow will always have a workflow.json file, so every bundle includes
|
|
266
|
-
* it and its content at minimum. To the extent the workflow includes any
|
|
267
|
-
* extractable fields, those fields content get extracted out and added to the
|
|
268
|
-
* bundle.
|
|
269
|
-
*
|
|
270
|
-
* Important things to keep in mind re: content extraction:
|
|
271
|
-
* 1. There can be multiple places in workflow json where content extraction
|
|
272
|
-
* happens.
|
|
273
|
-
* 2. There can be multiple levels of content extraction happening, currently
|
|
274
|
-
* at a maximum of 2 levels.
|
|
275
|
-
*
|
|
276
|
-
* The way this function works and handles the content extraction is by:
|
|
277
|
-
* 1. Traversing the given step node, and compiling all annotated extraction
|
|
278
|
-
* settings by the object path in the node *ordered from leaf to root*.
|
|
279
|
-
* 2. Iterate over compiled extraction settings from leaf to root, and start
|
|
280
|
-
* extracting out the field as needed. In case the node that needs to be
|
|
281
|
-
* extracted out contains extracted file paths, then those file paths get
|
|
282
|
-
* rebased to relative to the referenced file.
|
|
283
|
-
*/ const buildWorkflowDirBundle = (remoteWorkflow, localWorkflow = {})=>{
|
|
284
|
-
const bundle = {};
|
|
285
|
-
const mutWorkflow = (0, _lodash.cloneDeep)(remoteWorkflow);
|
|
286
|
-
const localWorkflowStepsByRef = keyLocalWorkflowStepsByRef(localWorkflow.steps);
|
|
287
|
-
// Recursively traverse the workflow step tree, mutating it and the bundle
|
|
288
|
-
// along the way
|
|
289
|
-
recursivelyBuildWorkflowDirBundle(bundle, mutWorkflow.steps, localWorkflowStepsByRef);
|
|
290
|
-
// Then, prepare the workflow data to be written into a workflow json file.
|
|
291
|
-
return (0, _lodash.set)(bundle, [
|
|
292
|
-
_helpers1.WORKFLOW_JSON
|
|
293
|
-
], toWorkflowJson(mutWorkflow));
|
|
294
|
-
};
|
|
295
64
|
const writeWorkflowDirFromData = async (workflowDirCtx, remoteWorkflow)=>{
|
|
296
65
|
// If the workflow directory exists on the file system (i.e. previously
|
|
297
66
|
// pulled before), then read the workflow file to use as a reference.
|
|
298
67
|
const [localWorkflow] = workflowDirCtx.exists ? await (0, _reader.readWorkflowDir)(workflowDirCtx, {
|
|
299
68
|
withExtractedFiles: true
|
|
300
69
|
}) : [];
|
|
301
|
-
const bundle = buildWorkflowDirBundle(remoteWorkflow, localWorkflow);
|
|
70
|
+
const bundle = (0, _processorIsomorphic.buildWorkflowDirBundle)(remoteWorkflow, localWorkflow);
|
|
302
71
|
return writeWorkflowDirFromBundle(workflowDirCtx, bundle);
|
|
303
72
|
};
|
|
304
73
|
const writeWorkflowDirFromBundle = async (workflowDirCtx, workflowDirBundle)=>{
|
|
@@ -310,7 +79,7 @@ const writeWorkflowDirFromBundle = async (workflowDirCtx, workflowDirBundle)=>{
|
|
|
310
79
|
}
|
|
311
80
|
const promises = Object.entries(workflowDirBundle).map(([relpath, fileContent])=>{
|
|
312
81
|
const filePath = _nodePath.resolve(workflowDirCtx.abspath, relpath);
|
|
313
|
-
return relpath ===
|
|
82
|
+
return relpath === _processorIsomorphic.WORKFLOW_JSON ? _fsExtra.outputJson(filePath, fileContent, {
|
|
314
83
|
spaces: _json.DOUBLE_SPACES
|
|
315
84
|
}) : _fsExtra.outputFile(filePath, fileContent);
|
|
316
85
|
});
|
|
@@ -346,7 +115,7 @@ const writeWorkflowDirFromBundle = async (workflowDirCtx, workflowDirBundle)=>{
|
|
|
346
115
|
const promises = dirents.map(async (dirent)=>{
|
|
347
116
|
const direntName = dirent.name.toLowerCase();
|
|
348
117
|
const direntPath = _nodePath.resolve(indexDirCtx.abspath, direntName);
|
|
349
|
-
if (await (0,
|
|
118
|
+
if (await (0, _helpers.isWorkflowDir)(direntPath) && workflowsByKey[direntName]) {
|
|
350
119
|
return;
|
|
351
120
|
}
|
|
352
121
|
await _fsExtra.remove(direntPath);
|
|
@@ -369,7 +138,7 @@ const writeWorkflowsIndexDir = async (indexDirCtx, remoteWorkflows)=>{
|
|
|
369
138
|
type: "workflow",
|
|
370
139
|
key: workflow.key,
|
|
371
140
|
abspath: workflowDirPath,
|
|
372
|
-
exists: indexDirCtx.exists ? await (0,
|
|
141
|
+
exists: indexDirCtx.exists ? await (0, _helpers.isWorkflowDir)(workflowDirPath) : false
|
|
373
142
|
};
|
|
374
143
|
return writeWorkflowDirFromData(workflowDirCtx, workflow);
|
|
375
144
|
});
|